Merge "bitstream: fix logical-not-parentheses warning"
diff --git a/libs.mk b/libs.mk
index ee4c750..7a88b92 100644
--- a/libs.mk
+++ b/libs.mk
@@ -69,7 +69,7 @@
   INSTALL-LIBS-$(CONFIG_SPATIAL_SVC) += include/vpx/svc_context.h
   INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/%
   CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8cx.h
-  CODEC_DOC_SECTIONS += vp9 vp9_encoder
+  CODEC_DOC_SECTIONS += vp10 vp10_encoder
 endif
 
 ifeq ($(CONFIG_VP10_DECODER),yes)
@@ -81,7 +81,7 @@
   INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8dx.h
   INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/%
   CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8dx.h
-  CODEC_DOC_SECTIONS += vp9 vp9_decoder
+  CODEC_DOC_SECTIONS += vp10 vp10_decoder
 endif
 
 VP10_PREFIX=vp10/
diff --git a/test/arf_freq_test.cc b/test/arf_freq_test.cc
index 761e7b5..0e48f7a 100644
--- a/test/arf_freq_test.cc
+++ b/test/arf_freq_test.cc
@@ -66,7 +66,7 @@
 
 const int kMinArfVectors[] = {
   // NOTE: 0 refers to the default built-in logic in:
-  //       vp9_rc_get_default_min_gf_interval(...)
+  //       vp10_rc_get_default_min_gf_interval(...)
   0, 4, 8, 12, 15
 };
 
diff --git a/test/boolcoder_test.cc b/test/boolcoder_test.cc
index c61bb4a..2bc56f9 100644
--- a/test/boolcoder_test.cc
+++ b/test/boolcoder_test.cc
@@ -25,7 +25,7 @@
 const int num_tests = 10;
 }  // namespace
 
-TEST(VP9, TestBitIO) {
+TEST(VP10, TestBitIO) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   for (int n = 0; n < num_tests; ++n) {
     for (int method = 0; method <= 7; ++method) {   // we generate various proba
diff --git a/test/test-data.mk b/test/test-data.mk
index 381a183..768812a 100644
--- a/test/test-data.mk
+++ b/test/test-data.mk
@@ -18,99 +18,9 @@
 LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_444.y4m
 LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_440.yuv
 
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += desktop_credits.y4m
 LIBVPX_TEST_DATA-$(CONFIG_VP10_ENCODER) += niklas_1280_720_30.y4m
 LIBVPX_TEST_DATA-$(CONFIG_VP10_ENCODER) += rush_hour_444.y4m
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += screendata.y4m
-
-ifeq ($(CONFIG_DECODE_PERF_TESTS),yes)
-# Encode / Decode test
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += niklas_1280_720_30.yuv
-# BBB VP9 streams
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_426x240_tile_1x1_180kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_640x360_tile_1x2_337kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_854x480_tile_1x2_651kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1280x720_tile_1x4_1310kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x1_2581kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x4_2586kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x4_fpm_2304kbps.webm
-# Sintel VP9 streams
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_426x182_tile_1x1_171kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_640x272_tile_1x2_318kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_854x364_tile_1x2_621kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_1280x546_tile_1x4_1257kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_1920x818_tile_1x4_fpm_2279kbps.webm
-# TOS VP9 streams
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_426x178_tile_1x1_181kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_640x266_tile_1x2_336kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_854x356_tile_1x2_656kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_854x356_tile_1x2_fpm_546kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1280x534_tile_1x4_1306kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1280x534_tile_1x4_fpm_952kbps.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1920x800_tile_1x4_fpm_2335kbps.webm
-endif  # CONFIG_DECODE_PERF_TESTS
-
-ifeq ($(CONFIG_ENCODE_PERF_TESTS),yes)
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += desktop_640_360_30.yuv
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += kirland_640_480_30.yuv
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += macmarcomoving_640_480_30.yuv
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += macmarcostationary_640_480_30.yuv
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += niklas_1280_720_30.yuv
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += niklas_640_480_30.yuv
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += tacomanarrows_640_480_30.yuv
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += tacomasmallcameramovement_640_480_30.yuv
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += thaloundeskmtg_640_480_30.yuv
-endif  # CONFIG_ENCODE_PERF_TESTS
 
 # sort and remove duplicates
 LIBVPX_TEST_DATA-yes := $(sort $(LIBVPX_TEST_DATA-yes))
 
-# VP9 dynamic resizing test (decoder)
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_5_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_5_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_5_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_5_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_7_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_7_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_7_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x180_7_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_5_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_5_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_5_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_5_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_7_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_7_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_7_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_320x240_7_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_5_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_5_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_5_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_5_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_7_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_7_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_7_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x360_7_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_5_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_5_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_5_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_5_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_7_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_7_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_7_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_640x480_7_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_5_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_5_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_5_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_5_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_7_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_7_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_7_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1280x720_7_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_5_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_5_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_5_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_5_3-4.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_7_1-2.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_7_1-2.webm.md5
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_7_3-4.webm
-LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-21-resize_inter_1920x1080_7_3-4.webm.md5
diff --git a/test/test-data.sha1 b/test/test-data.sha1
index c958444..3d9bfc7 100644
--- a/test/test-data.sha1
+++ b/test/test-data.sha1
@@ -1,19 +1,5 @@
 d5dfb0151c9051f8c85999255645d7a23916d3c0 *hantro_collage_w352h288.yuv
 b87815bf86020c592ccc7a846ba2e28ec8043902 *hantro_odd.yuv
-76024eb753cdac6a5e5703aaea189d35c3c30ac7 *invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf
-7448d8798a4380162d4b56f9b452e2f6f9e24e7a *invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf.res
-83f50908c8dc0ef8760595447a2ff7727489542e *invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf
-456d1493e52d32a5c30edf44a27debc1fa6b253a *invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf.res
-c123d1f9f02fb4143abb5e271916e3a3080de8f6 *invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf
-456d1493e52d32a5c30edf44a27debc1fa6b253a *invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf.res
-fe346136b9b8c1e6f6084cc106485706915795e4 *invalid-vp90-01-v3.webm
-5d9474c0309b7ca09a182d888f73b37a8fe1362c *invalid-vp90-01-v3.webm.res
-d78e2fceba5ac942246503ec8366f879c4775ca5 *invalid-vp90-02-v2.webm
-8e2eff4af87d2b561cce2365713269e301457ef3 *invalid-vp90-02-v2.webm.res
-df1a1453feb3c00d7d89746c7003b4163523bff3 *invalid-vp90-03-v3.webm
-4935c62becc68c13642a03db1e6d3e2331c1c612 *invalid-vp90-03-v3.webm.res
-d637297561dd904eb2c97a9015deeb31c4a1e8d2 *invalid-vp90-2-08-tile_1x4_frame_parallel_all_key.webm
-3a204bdbeaa3c6458b77bcebb8366d107267f55d *invalid-vp90-2-08-tile_1x4_frame_parallel_all_key.webm.res
 a432f96ff0a787268e2f94a8092ab161a18d1b06 *park_joy_90p_10_420.y4m
 0b194cc312c3a2e84d156a221b0a5eb615dfddc5 *park_joy_90p_10_422.y4m
 ff0e0a21dc2adc95b8c1b37902713700655ced17 *park_joy_90p_10_444.y4m
@@ -38,30 +24,5 @@
 9a70e8b7d14fba9234d0e51dce876635413ce444 *thaloundeskmtg_640_480_30.yuv
 e7d315dbf4f3928779e0dc624311196d44491d32 *niklas_1280_720_30.yuv
 717da707afcaa1f692ff1946f291054eb75a4f06 *screendata.y4m
-b7c1296630cdf1a7ef493d15ff4f9eb2999202f6 *invalid-vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf
-0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf.res
-359e138dfb66863828397b77000ea7a83c844d02 *invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf
-bbd33de01c17b165b4ce00308e8a19a942023ab8 *invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf.res
-fac89b5735be8a86b0dc05159f996a5c3208ae32 *invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf
-0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf.res
-4506dfdcdf8ee4250924b075a0dcf1f070f72e5a *invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf
-bcdedaf168ac225575468fda77502d2dc9fd5baa *invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf.res
-b03c408cf23158638da18dbc3323b99a1635c68a *invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf
-0a3884edb3fd8f9d9b500223e650f7de257b67d8 *invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf.res
-5e67e24e7f53fd189e565513cef8519b1bd6c712 *invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf
-741158f67c0d9d23726624d06bdc482ad368afc9 *invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf.res
-8b1f7bf7e86c0976d277f60e8fcd9539e75a079a *invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf
-9c6bdf048fb2e66f07d4b4db5b32e6f303bd6109 *invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf.res
-552e372e9b78127389fb06b34545df2cec15ba6d *invalid-vp91-2-mixedrefcsp-444to420.ivf
-a61774cf03fc584bd9f0904fc145253bb8ea6c4c *invalid-vp91-2-mixedrefcsp-444to420.ivf.res
-812d05a64a0d83c1b504d0519927ddc5a2cdb273 *invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf
-1e472baaf5f6113459f0399a38a5a5e68d17799d *invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
-efd5a51d175cfdacd169ed23477729dc558030dc *invalid-vp90-2-07-frame_parallel-1.webm
-9f912712ec418be69adb910e2ca886a63c4cec08 *invalid-vp90-2-07-frame_parallel-2.webm
-445f5a53ca9555341852997ccdd480a51540bd14 *invalid-vp90-2-07-frame_parallel-3.webm
-d18c90709a0d03c82beadf10898b27d88fff719c *invalid-vp90-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf
-d06285d109ecbaef63b0cbcc44d70a129186f51c *invalid-vp90-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf.res
-e60d859b0ef2b331b21740cf6cb83fabe469b079 *invalid-vp90-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf
-0ae808dca4d3c1152a9576e14830b6faa39f1b4a *invalid-vp90-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf.res
 9cfc855459e7549fd015c79e8eca512b2f2cb7e3 *niklas_1280_720_30.y4m
 5b5763b388b1b52a81bb82b39f7ec25c4bd3d0e1 *desktop_credits.y4m
diff --git a/vp10/common/onyxc_int.h b/vp10/common/onyxc_int.h
index 701fad1..067b261 100644
--- a/vp10/common/onyxc_int.h
+++ b/vp10/common/onyxc_int.h
@@ -203,7 +203,7 @@
   MODE_INFO *mi;  /* Corresponds to upper left visible macroblock */
 
   // TODO(agrange): Move prev_mi into encoder structure.
-  // prev_mip and prev_mi will only be allocated in VP9 encoder.
+  // prev_mip and prev_mi will only be allocated in encoder.
   MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
   MODE_INFO *prev_mi;  /* 'mi' from last frame (points into prev_mip) */
 
@@ -416,7 +416,7 @@
   xd->left_available  = (mi_col > tile->mi_col_start);
   if (xd->up_available) {
     xd->above_mi = xd->mi[-xd->mi_stride];
-    // above_mi may be NULL in VP9 encoder's first pass.
+    // above_mi may be NULL in encoder's first pass.
     xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL;
   } else {
     xd->above_mi = NULL;
@@ -425,7 +425,7 @@
 
   if (xd->left_available) {
     xd->left_mi = xd->mi[-1];
-    // left_mi may be NULL in VP9 encoder's first pass.
+    // left_mi may be NULL in encoder's first pass.
     xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL;
   } else {
     xd->left_mi = NULL;
diff --git a/vp10/common/vp10_inv_txfm.h b/vp10/common/vp10_inv_txfm.h
index e5b5889..7f71dd5 100644
--- a/vp10/common/vp10_inv_txfm.h
+++ b/vp10/common/vp10_inv_txfm.h
@@ -23,9 +23,9 @@
 
 static INLINE tran_low_t check_range(tran_high_t input) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-  // For valid VP9 input streams, intermediate stage coefficients should always
+  // For valid input streams, intermediate stage coefficients should always
   // stay within the range of a signed 16 bit integer. Coefficients can go out
-  // of this range for invalid/corrupt VP9 streams. However, strictly checking
+  // of this range for invalid/corrupt streams. However, strictly checking
   // this range for every intermediate coefficient can burdensome for a decoder,
   // therefore the following assertion is only enabled when configured with
   // --enable-coefficient-range-checking.
@@ -44,7 +44,7 @@
 static INLINE tran_low_t highbd_check_range(tran_high_t input,
                                             int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-  // For valid highbitdepth VP9 streams, intermediate stage coefficients will
+  // For valid highbitdepth streams, intermediate stage coefficients will
   // stay within the ranges:
   // - 8 bit: signed 16 bit integer
   // - 10 bit: signed 18 bit integer
@@ -69,7 +69,7 @@
 #if CONFIG_EMULATE_HARDWARE
 // When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
 // non-normative method to handle overflows. A stream that causes
-// overflows  in the inverse transform is considered invalid in VP9,
+// overflows  in the inverse transform is considered invalid,
 // and a hardware implementer is free to choose any reasonable
 // method to handle overflows. However to aid in hardware
 // verification they can use a specific implementation of the
diff --git a/vp10/encoder/aq_cyclicrefresh.c b/vp10/encoder/aq_cyclicrefresh.c
index 660670c..7a36d61 100644
--- a/vp10/encoder/aq_cyclicrefresh.c
+++ b/vp10/encoder/aq_cyclicrefresh.c
@@ -407,7 +407,7 @@
     int mi_row = sb_row_index * MI_BLOCK_SIZE;
     int mi_col = sb_col_index * MI_BLOCK_SIZE;
     int qindex_thresh =
-        cpi->oxcf.content == VP9E_CONTENT_SCREEN
+        cpi->oxcf.content == VPX_CONTENT_SCREEN
             ? vp10_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
             : 0;
     assert(mi_row >= 0 && mi_row < cm->mi_rows);
diff --git a/vp10/encoder/dct.c b/vp10/encoder/dct.c
index 701871b..6fb7870 100644
--- a/vp10/encoder/dct.c
+++ b/vp10/encoder/dct.c
@@ -151,180 +151,148 @@
   range_check(output, 8, 16);
 }
 
-static void fdct16(const tran_low_t *input, tran_low_t *output) {
-  tran_high_t temp;
-  tran_low_t step[16];
+static void fdct16(const tran_low_t in[16], tran_low_t out[16]) {
+  tran_high_t step1[8];
+  tran_high_t step2[8];
+  tran_high_t step3[8];
+  tran_high_t input[8];
+  tran_high_t temp1, temp2;
 
-  // stage 0
-  range_check(input, 16, 13);
+  // step 1
+  input[0] = in[0] + in[15];
+  input[1] = in[1] + in[14];
+  input[2] = in[2] + in[13];
+  input[3] = in[3] + in[12];
+  input[4] = in[4] + in[11];
+  input[5] = in[5] + in[10];
+  input[6] = in[6] + in[ 9];
+  input[7] = in[7] + in[ 8];
 
-  // stage 1
-  output[0] = input[0] + input[15];
-  output[1] = input[1] + input[14];
-  output[2] = input[2] + input[13];
-  output[3] = input[3] + input[12];
-  output[4] = input[4] + input[11];
-  output[5] = input[5] + input[10];
-  output[6] = input[6] + input[9];
-  output[7] = input[7] + input[8];
-  output[8] = input[7] - input[8];
-  output[9] = input[6] - input[9];
-  output[10] = input[5] - input[10];
-  output[11] = input[4] - input[11];
-  output[12] = input[3] - input[12];
-  output[13] = input[2] - input[13];
-  output[14] = input[1] - input[14];
-  output[15] = input[0] - input[15];
+  step1[0] = in[7] - in[ 8];
+  step1[1] = in[6] - in[ 9];
+  step1[2] = in[5] - in[10];
+  step1[3] = in[4] - in[11];
+  step1[4] = in[3] - in[12];
+  step1[5] = in[2] - in[13];
+  step1[6] = in[1] - in[14];
+  step1[7] = in[0] - in[15];
 
-  range_check(output, 16, 14);
+  // fdct8(step, step);
+  {
+    tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
+    tran_high_t t0, t1, t2, t3;
+    tran_high_t x0, x1, x2, x3;
 
-  // stage 2
-  step[0] = output[0] + output[7];
-  step[1] = output[1] + output[6];
-  step[2] = output[2] + output[5];
-  step[3] = output[3] + output[4];
-  step[4] = output[3] - output[4];
-  step[5] = output[2] - output[5];
-  step[6] = output[1] - output[6];
-  step[7] = output[0] - output[7];
-  step[8] = output[8];
-  step[9] = output[9];
-  temp = output[10] * -cospi_16_64 + output[13] * cospi_16_64;
-  step[10] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[11] * -cospi_16_64 + output[12] * cospi_16_64;
-  step[11] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[12] * cospi_16_64 + output[11] * cospi_16_64;
-  step[12] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[13] * cospi_16_64 + output[10] * cospi_16_64;
-  step[13] = (tran_low_t)fdct_round_shift(temp);
-  step[14] = output[14];
-  step[15] = output[15];
+    // stage 1
+    s0 = input[0] + input[7];
+    s1 = input[1] + input[6];
+    s2 = input[2] + input[5];
+    s3 = input[3] + input[4];
+    s4 = input[3] - input[4];
+    s5 = input[2] - input[5];
+    s6 = input[1] - input[6];
+    s7 = input[0] - input[7];
 
-  range_check(step, 16, 15);
+    // fdct4(step, step);
+    x0 = s0 + s3;
+    x1 = s1 + s2;
+    x2 = s1 - s2;
+    x3 = s0 - s3;
+    t0 = (x0 + x1) * cospi_16_64;
+    t1 = (x0 - x1) * cospi_16_64;
+    t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
+    t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
+    out[0] = (tran_low_t)fdct_round_shift(t0);
+    out[4] = (tran_low_t)fdct_round_shift(t2);
+    out[8] = (tran_low_t)fdct_round_shift(t1);
+    out[12] = (tran_low_t)fdct_round_shift(t3);
 
-  // stage 3
-  output[0] = step[0] + step[3];
-  output[1] = step[1] + step[2];
-  output[2] = step[1] - step[2];
-  output[3] = step[0] - step[3];
-  output[4] = step[4];
-  temp = step[5] * -cospi_16_64 + step[6] * cospi_16_64;
-  output[5] = (tran_low_t)fdct_round_shift(temp);
-  temp = step[6] * cospi_16_64 + step[5] * cospi_16_64;
-  output[6] = (tran_low_t)fdct_round_shift(temp);
-  output[7] = step[7];
-  output[8] = step[8] + step[11];
-  output[9] = step[9] + step[10];
-  output[10] = step[9] - step[10];
-  output[11] = step[8] - step[11];
-  output[12] = step[15] - step[12];
-  output[13] = step[14] - step[13];
-  output[14] = step[14] + step[13];
-  output[15] = step[15] + step[12];
+    // Stage 2
+    t0 = (s6 - s5) * cospi_16_64;
+    t1 = (s6 + s5) * cospi_16_64;
+    t2 = fdct_round_shift(t0);
+    t3 = fdct_round_shift(t1);
 
-  range_check(output, 16, 16);
+    // Stage 3
+    x0 = s4 + t2;
+    x1 = s4 - t2;
+    x2 = s7 - t3;
+    x3 = s7 + t3;
 
-  // stage 4
-  temp = output[0] * cospi_16_64 + output[1] * cospi_16_64;
-  step[0] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[1] * -cospi_16_64 + output[0] * cospi_16_64;
-  step[1] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[2] * cospi_24_64 + output[3] * cospi_8_64;
-  step[2] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[3] * cospi_24_64 + output[2] * -cospi_8_64;
-  step[3] = (tran_low_t)fdct_round_shift(temp);
-  step[4] = output[4] + output[5];
-  step[5] = output[4] - output[5];
-  step[6] = output[7] - output[6];
-  step[7] = output[7] + output[6];
-  step[8] = output[8];
-  temp = output[9] * -cospi_8_64 + output[14] * cospi_24_64;
-  step[9] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[10] * -cospi_24_64 + output[13] * -cospi_8_64;
-  step[10] = (tran_low_t)fdct_round_shift(temp);
-  step[11] = output[11];
-  step[12] = output[12];
-  temp = output[13] * cospi_24_64 + output[10] * -cospi_8_64;
-  step[13] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[14] * cospi_8_64 + output[9] * cospi_24_64;
-  step[14] = (tran_low_t)fdct_round_shift(temp);
-  step[15] = output[15];
+    // Stage 4
+    t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+    t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+    t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+    t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+    out[2] = (tran_low_t)fdct_round_shift(t0);
+    out[6] = (tran_low_t)fdct_round_shift(t2);
+    out[10] = (tran_low_t)fdct_round_shift(t1);
+    out[14] = (tran_low_t)fdct_round_shift(t3);
+  }
 
-  range_check(step, 16, 16);
+  // step 2
+  temp1 = (step1[5] - step1[2]) * cospi_16_64;
+  temp2 = (step1[4] - step1[3]) * cospi_16_64;
+  step2[2] = fdct_round_shift(temp1);
+  step2[3] = fdct_round_shift(temp2);
+  temp1 = (step1[4] + step1[3]) * cospi_16_64;
+  temp2 = (step1[5] + step1[2]) * cospi_16_64;
+  step2[4] = fdct_round_shift(temp1);
+  step2[5] = fdct_round_shift(temp2);
 
-  // stage 5
-  output[0] = step[0];
-  output[1] = step[1];
-  output[2] = step[2];
-  output[3] = step[3];
-  temp = step[4] * cospi_28_64 + step[7] * cospi_4_64;
-  output[4] = (tran_low_t)fdct_round_shift(temp);
-  temp = step[5] * cospi_12_64 + step[6] * cospi_20_64;
-  output[5] = (tran_low_t)fdct_round_shift(temp);
-  temp = step[6] * cospi_12_64 + step[5] * -cospi_20_64;
-  output[6] = (tran_low_t)fdct_round_shift(temp);
-  temp = step[7] * cospi_28_64 + step[4] * -cospi_4_64;
-  output[7] = (tran_low_t)fdct_round_shift(temp);
-  output[8] = step[8] + step[9];
-  output[9] = step[8] - step[9];
-  output[10] = step[11] - step[10];
-  output[11] = step[11] + step[10];
-  output[12] = step[12] + step[13];
-  output[13] = step[12] - step[13];
-  output[14] = step[15] - step[14];
-  output[15] = step[15] + step[14];
+  // step 3
+  step3[0] = step1[0] + step2[3];
+  step3[1] = step1[1] + step2[2];
+  step3[2] = step1[1] - step2[2];
+  step3[3] = step1[0] - step2[3];
+  step3[4] = step1[7] - step2[4];
+  step3[5] = step1[6] - step2[5];
+  step3[6] = step1[6] + step2[5];
+  step3[7] = step1[7] + step2[4];
 
-  range_check(output, 16, 16);
+  // step 4
+  temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
+  temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64;
+  step2[1] = fdct_round_shift(temp1);
+  step2[2] = fdct_round_shift(temp2);
+  temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64;
+  temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
+  step2[5] = fdct_round_shift(temp1);
+  step2[6] = fdct_round_shift(temp2);
 
-  // stage 6
-  step[0] = output[0];
-  step[1] = output[1];
-  step[2] = output[2];
-  step[3] = output[3];
-  step[4] = output[4];
-  step[5] = output[5];
-  step[6] = output[6];
-  step[7] = output[7];
-  temp = output[8] * cospi_30_64 + output[15] * cospi_2_64;
-  step[8] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[9] * cospi_14_64 + output[14] * cospi_18_64;
-  step[9] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[10] * cospi_22_64 + output[13] * cospi_10_64;
-  step[10] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[11] * cospi_6_64 + output[12] * cospi_26_64;
-  step[11] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[12] * cospi_6_64 + output[11] * -cospi_26_64;
-  step[12] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[13] * cospi_22_64 + output[10] * -cospi_10_64;
-  step[13] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[14] * cospi_14_64 + output[9] * -cospi_18_64;
-  step[14] = (tran_low_t)fdct_round_shift(temp);
-  temp = output[15] * cospi_30_64 + output[8] * -cospi_2_64;
-  step[15] = (tran_low_t)fdct_round_shift(temp);
+  // step 5
+  step1[0] = step3[0] + step2[1];
+  step1[1] = step3[0] - step2[1];
+  step1[2] = step3[3] + step2[2];
+  step1[3] = step3[3] - step2[2];
+  step1[4] = step3[4] - step2[5];
+  step1[5] = step3[4] + step2[5];
+  step1[6] = step3[7] - step2[6];
+  step1[7] = step3[7] + step2[6];
 
-  range_check(step, 16, 16);
+  // step 6
+  temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
+  temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
+  out[1] = (tran_low_t)fdct_round_shift(temp1);
+  out[9] = (tran_low_t)fdct_round_shift(temp2);
 
-  // stage 7
-  output[0] = step[0];
-  output[1] = step[8];
-  output[2] = step[4];
-  output[3] = step[12];
-  output[4] = step[2];
-  output[5] = step[10];
-  output[6] = step[6];
-  output[7] = step[14];
-  output[8] = step[1];
-  output[9] = step[9];
-  output[10] = step[5];
-  output[11] = step[13];
-  output[12] = step[3];
-  output[13] = step[11];
-  output[14] = step[7];
-  output[15] = step[15];
+  temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
+  temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
+  out[5] = (tran_low_t)fdct_round_shift(temp1);
+  out[13] = (tran_low_t)fdct_round_shift(temp2);
 
-  range_check(output, 16, 16);
+  temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
+  temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
+  out[3] = (tran_low_t)fdct_round_shift(temp1);
+  out[11] = (tran_low_t)fdct_round_shift(temp2);
+
+  temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
+  temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
+  out[7] = (tran_low_t)fdct_round_shift(temp1);
+  out[15] = (tran_low_t)fdct_round_shift(temp2);
 }
 
+
 /* TODO(angiebird): Unify this with vp10_fwd_txfm.c: vp10_fdct32
 static void fdct32(const tran_low_t *input, tran_low_t *output) {
   tran_high_t temp;
diff --git a/vp10/encoder/encoder.h b/vp10/encoder/encoder.h
index 73b4343..87219ed 100644
--- a/vp10/encoder/encoder.h
+++ b/vp10/encoder/encoder.h
@@ -219,8 +219,8 @@
   vpx_fixed_buf_t firstpass_mb_stats_in;
 #endif
 
-  vp8e_tuning tuning;
-  vp9e_tune_content content;
+  vpx_tune_metric tuning;
+  vpx_tune_content content;
 #if CONFIG_VPX_HIGHBITDEPTH
   int use_highbitdepth;
 #endif
diff --git a/vp10/encoder/speed_features.c b/vp10/encoder/speed_features.c
index ce0aebe..b82dec0 100644
--- a/vp10/encoder/speed_features.c
+++ b/vp10/encoder/speed_features.c
@@ -261,7 +261,7 @@
 }
 
 static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
-                                 int speed, vp9e_tune_content content) {
+                                 int speed, vpx_tune_content content) {
   VP10_COMMON *const cm = &cpi->common;
   const int is_keyframe = cm->frame_type == KEY_FRAME;
   const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
@@ -372,7 +372,7 @@
 
     if (!is_keyframe) {
       int i;
-      if (content == VP9E_CONTENT_SCREEN) {
+      if (content == VPX_CONTENT_SCREEN) {
         for (i = 0; i < BLOCK_SIZES; ++i)
           sf->intra_y_mode_bsize_mask[i] = INTRA_DC_TM_H_V;
       } else {
diff --git a/vp10/vp10_common.mk b/vp10/vp10_common.mk
index f60c07c..5d8f0c4 100644
--- a/vp10/vp10_common.mk
+++ b/vp10/vp10_common.mk
@@ -46,7 +46,6 @@
 VP10_COMMON_SRCS-yes += common/scale.c
 VP10_COMMON_SRCS-yes += common/seg_common.h
 VP10_COMMON_SRCS-yes += common/seg_common.c
-VP10_COMMON_SRCS-yes += common/textblit.h
 VP10_COMMON_SRCS-yes += common/tile_common.h
 VP10_COMMON_SRCS-yes += common/tile_common.c
 VP10_COMMON_SRCS-yes += common/loopfilter.c
diff --git a/vp10/vp10_cx_iface.c b/vp10/vp10_cx_iface.c
index aabfe89..9e38e23 100644
--- a/vp10/vp10_cx_iface.c
+++ b/vp10/vp10_cx_iface.c
@@ -33,7 +33,7 @@
   unsigned int                arnr_strength;
   unsigned int                min_gf_interval;
   unsigned int                max_gf_interval;
-  vp8e_tuning                 tuning;
+  vpx_tune_metric             tuning;
   unsigned int                cq_level;  // constrained quality level
   unsigned int                rc_max_intra_bitrate_pct;
   unsigned int                rc_max_inter_bitrate_pct;
@@ -43,7 +43,7 @@
   AQ_MODE                     aq_mode;
   unsigned int                frame_periodic_boost;
   vpx_bit_depth_t             bit_depth;
-  vp9e_tune_content           content;
+  vpx_tune_content            content;
   vpx_color_space_t           color_space;
   int                         color_range;
   int                         render_width;
@@ -62,7 +62,7 @@
   5,                          // arnr_strength
   0,                          // min_gf_interval; 0 -> default decision
   0,                          // max_gf_interval; 0 -> default decision
-  VP8_TUNE_PSNR,              // tuning
+  VPX_TUNE_PSNR,              // tuning
   10,                         // cq_level
   0,                          // rc_max_intra_bitrate_pct
   0,                          // rc_max_inter_bitrate_pct
@@ -72,7 +72,7 @@
   NO_AQ,                      // aq_mode
   0,                          // frame_periodic_delta_q
   VPX_BITS_8,                 // Bit depth
-  VP9E_CONTENT_DEFAULT,       // content
+  VPX_CONTENT_DEFAULT,       // content
   VPX_CS_UNKNOWN,             // color space
   0,                          // color range
   0,                          // render width
@@ -218,10 +218,10 @@
   RANGE_CHECK(cfg, g_bit_depth, VPX_BITS_8, VPX_BITS_12);
   RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
   RANGE_CHECK(extra_cfg, content,
-              VP9E_CONTENT_DEFAULT, VP9E_CONTENT_INVALID - 1);
+              VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
 
   // TODO(yaowu): remove this when ssim tuning is implemented for vp9
-  if (extra_cfg->tuning == VP8_TUNE_SSIM)
+  if (extra_cfg->tuning == VPX_TUNE_SSIM)
       ERROR("Option --tune=ssim is not currently supported in VP9.");
 
   if (cfg->g_pass == VPX_RC_LAST_PASS) {
diff --git a/vpx/vp8cx.h b/vpx/vp8cx.h
index 3cbb0ec..3344307 100644
--- a/vpx/vp8cx.h
+++ b/vpx/vp8cx.h
@@ -441,8 +441,8 @@
 
   /*!\brief Codec control function to set content type.
    * \note Valid parameter range:
-   *              VP9E_CONTENT_DEFAULT = Regular video content (Default)
-   *              VP9E_CONTENT_SCREEN  = Screen capture content
+   *              VPX_CONTENT_DEFAULT = Regular video content (Default)
+   *              VPX_CONTENT_SCREEN  = Screen capture content
    *
    * Supported in codecs: VP9
    */
@@ -635,10 +635,10 @@
 
 /*!brief VP9 encoder content type */
 typedef enum {
-  VP9E_CONTENT_DEFAULT,
-  VP9E_CONTENT_SCREEN,
-  VP9E_CONTENT_INVALID
-} vp9e_tune_content;
+  VPX_CONTENT_DEFAULT,
+  VPX_CONTENT_SCREEN,
+  VPX_CONTENT_INVALID
+} vpx_tune_content;
 
 /*!\brief VP8 model tuning parameters
  *
@@ -646,9 +646,9 @@
  *
  */
 typedef enum {
-  VP8_TUNE_PSNR,
-  VP8_TUNE_SSIM
-} vp8e_tuning;
+  VPX_TUNE_PSNR,
+  VPX_TUNE_SSIM
+} vpx_tune_metric;
 
 /*!\brief  vp9 svc layer parameters
  *
@@ -724,7 +724,7 @@
 #define VPX_CTRL_VP8E_SET_ARNR_STRENGTH
 VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_SET_ARNR_TYPE,     unsigned int)
 #define VPX_CTRL_VP8E_SET_ARNR_TYPE
-VPX_CTRL_USE_TYPE(VP8E_SET_TUNING,             int) /* vp8e_tuning */
+VPX_CTRL_USE_TYPE(VP8E_SET_TUNING,             int) /* vpx_tune_metric */
 #define VPX_CTRL_VP8E_SET_TUNING
 VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL,      unsigned int)
 #define VPX_CTRL_VP8E_SET_CQ_LEVEL
@@ -767,7 +767,7 @@
 VPX_CTRL_USE_TYPE(VP9E_SET_NOISE_SENSITIVITY,  unsigned int)
 #define VPX_CTRL_VP9E_SET_NOISE_SENSITIVITY
 
-VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vp9e_tune_content */
+VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vpx_tune_content */
 #define VPX_CTRL_VP9E_SET_TUNE_CONTENT
 
 VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_SPACE, int)
diff --git a/vpx_dsp/inv_txfm.h b/vpx_dsp/inv_txfm.h
index cc689f6..3092afa 100644
--- a/vpx_dsp/inv_txfm.h
+++ b/vpx_dsp/inv_txfm.h
@@ -23,9 +23,9 @@
 
 static INLINE tran_low_t check_range(tran_high_t input) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-  // For valid VP9 input streams, intermediate stage coefficients should always
+  // For valid input streams, intermediate stage coefficients should always
   // stay within the range of a signed 16 bit integer. Coefficients can go out
-  // of this range for invalid/corrupt VP9 streams. However, strictly checking
+  // of this range for invalid/corrupt streams. However, strictly checking
   // this range for every intermediate coefficient can burdensome for a decoder,
   // therefore the following assertion is only enabled when configured with
   // --enable-coefficient-range-checking.
@@ -44,7 +44,7 @@
 static INLINE tran_low_t highbd_check_range(tran_high_t input,
                                             int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-  // For valid highbitdepth VP9 streams, intermediate stage coefficients will
+  // For valid highbitdepth streams, intermediate stage coefficients will
   // stay within the ranges:
   // - 8 bit: signed 16 bit integer
   // - 10 bit: signed 18 bit integer
@@ -69,7 +69,7 @@
 #if CONFIG_EMULATE_HARDWARE
 // When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
 // non-normative method to handle overflows. A stream that causes
-// overflows  in the inverse transform is considered invalid in VP9,
+// overflows  in the inverse transform is considered invalid,
 // and a hardware implementer is free to choose any reasonable
 // method to handle overflows. However to aid in hardware
 // verification they can use a specific implementation of the
diff --git a/vpx_dsp/mips/fwd_txfm_msa.c b/vpx_dsp/mips/fwd_txfm_msa.c
index f66dd5f..5df40f9 100644
--- a/vpx_dsp/mips/fwd_txfm_msa.c
+++ b/vpx_dsp/mips/fwd_txfm_msa.c
@@ -186,9 +186,9 @@
     in0 += vec;
   }
 
-  VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+  VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-  VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+  VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
   ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
   SRA_4V(in0, in1, in2, in3, 2);
@@ -203,11 +203,11 @@
   LD_SH8(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7);
   SLLI_4V(in0, in1, in2, in3, 2);
   SLLI_4V(in4, in5, in6, in7, 2);
-  VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
+  VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
             in0, in1, in2, in3, in4, in5, in6, in7);
   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
-  VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
+  VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
             in0, in1, in2, in3, in4, in5, in6, in7);
   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
diff --git a/vpx_dsp/mips/fwd_txfm_msa.h b/vpx_dsp/mips/fwd_txfm_msa.h
index d1e160e..d7bb316 100644
--- a/vpx_dsp/mips/fwd_txfm_msa.h
+++ b/vpx_dsp/mips/fwd_txfm_msa.h
@@ -29,7 +29,7 @@
   HADD_SW_S32(vec_w_m);                                               \
 })
 
-#define VP9_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) {     \
+#define VPX_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) {     \
   v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m;                         \
   v8i16 vec0_m, vec1_m, vec2_m, vec3_m;                             \
   v4i32 vec4_m, vec5_m, vec6_m, vec7_m;                             \
@@ -67,7 +67,7 @@
              in4, in5, in6, in7);                                        \
 }
 
-#define VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,            \
+#define VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,            \
                   out0, out1, out2, out3, out4, out5, out6, out7) {  \
   v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m;                    \
   v8i16 s7_m, x0_m, x1_m, x2_m, x3_m;                                \
diff --git a/vpx_dsp/mips/idct16x16_msa.c b/vpx_dsp/mips/idct16x16_msa.c
index 5faac71..c74e264 100644
--- a/vpx_dsp/mips/idct16x16_msa.c
+++ b/vpx_dsp/mips/idct16x16_msa.c
@@ -189,16 +189,16 @@
   reg3 = tmp7;
 
   SRARI_H4_SH(reg0, reg2, reg4, reg6, 6);
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
   dst += (4 * dst_stride);
   SRARI_H4_SH(reg8, reg10, reg12, reg14, 6);
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
   dst += (4 * dst_stride);
   SRARI_H4_SH(reg3, reg13, reg11, reg5, 6);
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
   dst += (4 * dst_stride);
   SRARI_H4_SH(reg7, reg9, reg1, reg15, 6);
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
 }
 
 void vpx_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
@@ -303,7 +303,7 @@
                      l8, l9, l10, l11, l12, l13, l14, l15);
 
   /* ADST in horizontal */
-  VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7,
+  VPX_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7,
                    l8, l9, l10, l11, l12, l13, l14, l15,
                    r0, r1, r2, r3, r4, r5, r6, r7,
                    r8, r9, r10, r11, r12, r13, r14, r15);
@@ -345,20 +345,20 @@
   r15 = LD_SH(input + 15 * 16);
 
   /* stage 1 */
-  k0 = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
-  k1 = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
-  k2 = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
-  k3 = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
+  k1 = VPX_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
+  k2 = VPX_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
+  k3 = VPX_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
   MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
-  k0 = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
-  k1 = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
-  k2 = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
-  k3 = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
+  k1 = VPX_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
+  k2 = VPX_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
+  k3 = VPX_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
   MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
   BUTTERFLY_4(g0, g2, g10, g8, h8, h9, v2, v0);
-  k0 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
-  k1 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
-  k2 = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+  k1 = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+  k2 = VPX_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
   MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
 
   r1 = LD_SH(input + 1 * 16);
@@ -370,15 +370,15 @@
   r13 = LD_SH(input + 13 * 16);
   r14 = LD_SH(input + 14 * 16);
 
-  k0 = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
-  k1 = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
-  k2 = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
-  k3 = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
+  k1 = VPX_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
+  k2 = VPX_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
+  k3 = VPX_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
   MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, g4, g5, g6, g7);
-  k0 = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
-  k1 = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
-  k2 = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
-  k3 = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
+  k1 = VPX_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
+  k2 = VPX_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
+  k3 = VPX_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
   MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g12, g13, g14, g15);
   BUTTERFLY_4(g4, g6, g14, g12, h10, h11, v6, v4);
   BUTTERFLY_4(h8, h9, h11, h10, out0, out1, h11, h10);
@@ -393,9 +393,9 @@
   ST8x1_UB(res0, dst);
   ST8x1_UB(res1, dst + 15 * dst_stride);
 
-  k0 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
-  k1 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
-  k2 = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+  k1 = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+  k2 = VPX_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
   MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
   BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
   out8 = -out8;
@@ -410,9 +410,9 @@
   ST8x1_UB(res8, dst + dst_stride);
   ST8x1_UB(res9, dst + 14 * dst_stride);
 
-  k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
-  k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
-  k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+  k1 = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+  k2 = VPX_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
   MADD_BF(v0, v2, v4, v6, k0, k1, k2, k0, out4, out6, out5, out7);
   out4 = -out4;
   SRARI_H2_SH(out4, out5, 6);
@@ -437,8 +437,8 @@
   ST8x1_UB(res12, dst + 2 * dst_stride);
   ST8x1_UB(res13, dst + 13 * dst_stride);
 
-  k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
-  k3 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+  k3 = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
   MADD_SHORT(out6, out7, k0, k3, out6, out7);
   SRARI_H2_SH(out6, out7, 6);
   dst6 = LD_UB(dst + 4 * dst_stride);
@@ -461,8 +461,8 @@
   ST8x1_UB(res10, dst + 6 * dst_stride);
   ST8x1_UB(res11, dst + 9 * dst_stride);
 
-  k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
-  k2 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+  k1 = VPX_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
+  k2 = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
   MADD_SHORT(h10, h11, k1, k2, out2, out3);
   SRARI_H2_SH(out2, out3, 6);
   dst2 = LD_UB(dst + 7 * dst_stride);
diff --git a/vpx_dsp/mips/idct32x32_msa.c b/vpx_dsp/mips/idct32x32_msa.c
index d5b3966..de47597 100644
--- a/vpx_dsp/mips/idct32x32_msa.c
+++ b/vpx_dsp/mips/idct32x32_msa.c
@@ -559,11 +559,11 @@
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
   SRARI_H4_SH(m0, m2, m4, m6, 6);
-  VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
+  VPX_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
   SRARI_H4_SH(m0, m2, m4, m6, 6);
-  VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
+  VPX_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
                       m0, m2, m4, m6);
 
   /* Load 8 & Store 8 */
@@ -578,12 +578,12 @@
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
   SRARI_H4_SH(m1, m3, m5, m7, 6);
-  VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
+  VPX_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
                       m1, m3, m5, m7);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
   SRARI_H4_SH(m1, m3, m5, m7, 6);
-  VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
+  VPX_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
                       m1, m3, m5, m7);
 
   /* Load 8 & Store 8 */
@@ -598,12 +598,12 @@
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
   SRARI_H4_SH(n0, n2, n4, n6, 6);
-  VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
+  VPX_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
                       n0, n2, n4, n6);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
   SRARI_H4_SH(n0, n2, n4, n6, 6);
-  VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
+  VPX_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
                       n0, n2, n4, n6);
 
   /* Load 8 & Store 8 */
@@ -618,12 +618,12 @@
 
   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
   SRARI_H4_SH(n1, n3, n5, n7, 6);
-  VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
+  VPX_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
                       n1, n3, n5, n7);
 
   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
   SRARI_H4_SH(n1, n3, n5, n7, 6);
-  VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
+  VPX_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
                       n1, n3, n5, n7);
 }
 
diff --git a/vpx_dsp/mips/idct4x4_msa.c b/vpx_dsp/mips/idct4x4_msa.c
index f289d8e..04064f8 100644
--- a/vpx_dsp/mips/idct4x4_msa.c
+++ b/vpx_dsp/mips/idct4x4_msa.c
@@ -75,10 +75,10 @@
   LD4x4_SH(input, in0, in1, in2, in3);
   /* rows */
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-  VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+  VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
   /* columns */
   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
-  VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+  VPX_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
   /* rounding (add 2^3, divide by 2^4) */
   SRARI_H4_SH(in0, in1, in2, in3, 4);
   ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
diff --git a/vpx_dsp/mips/idct8x8_msa.c b/vpx_dsp/mips/idct8x8_msa.c
index fd667e4..6a24935 100644
--- a/vpx_dsp/mips/idct8x8_msa.c
+++ b/vpx_dsp/mips/idct8x8_msa.c
@@ -21,21 +21,21 @@
   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
   /* 1D idct8x8 */
-  VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
+  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                  in0, in1, in2, in3, in4, in5, in6, in7);
   /* columns transform */
   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
   /* 1D idct8x8 */
-  VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
+  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                  in0, in1, in2, in3, in4, in5, in6, in7);
   /* final rounding (add 2^4, divide by 2^5) and shift */
   SRARI_H4_SH(in0, in1, in2, in3, 5);
   SRARI_H4_SH(in4, in5, in6, in7, 5);
   /* add block and store 8x8 */
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
   dst += (4 * dst_stride);
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
 
 void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
@@ -51,10 +51,10 @@
 
   /* stage1 */
   ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
-  k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
-  k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
-  k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
-  k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+  k1 = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+  k2 = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+  k3 = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
   DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
   SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
@@ -63,10 +63,10 @@
 
   /* stage2 */
   ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
-  k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
-  k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
-  k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
-  k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+  k0 = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+  k1 = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+  k2 = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+  k3 = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
   DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
   SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
@@ -76,7 +76,7 @@
   /* stage3 */
   s0 = __msa_ilvr_h(s6, s5);
 
-  k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+  k1 = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
   DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
   SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS);
   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
@@ -86,7 +86,7 @@
               in0, in1, in2, in3, in4, in5, in6, in7);
   TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
-  VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
+  VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                  in0, in1, in2, in3, in4, in5, in6, in7);
 
   /* final rounding (add 2^4, divide by 2^5) and shift */
@@ -94,9 +94,9 @@
   SRARI_H4_SH(in4, in5, in6, in7, 5);
 
   /* add block and store 8x8 */
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
   dst += (4 * dst_stride);
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
 
 void vpx_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
@@ -110,7 +110,7 @@
   val = ROUND_POWER_OF_TWO(out, 5);
   vec = __msa_fill_h(val);
 
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
   dst += (4 * dst_stride);
-  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+  VPX_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
 }
diff --git a/vpx_dsp/mips/inv_txfm_msa.h b/vpx_dsp/mips/inv_txfm_msa.h
index 1458561..8d4a831 100644
--- a/vpx_dsp/mips/inv_txfm_msa.h
+++ b/vpx_dsp/mips/inv_txfm_msa.h
@@ -82,7 +82,7 @@
   out5 = -out5;                                                         \
 }
 
-#define VP9_SET_COSPI_PAIR(c0_h, c1_h) ({  \
+#define VPX_SET_COSPI_PAIR(c0_h, c1_h) ({  \
   v8i16 out0_m, r0_m, r1_m;                \
                                            \
   r0_m = __msa_fill_h(c0_h);               \
@@ -92,7 +92,7 @@
   out0_m;                                  \
 })
 
-#define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) {  \
+#define VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) {  \
   uint8_t *dst_m = (uint8_t *) (dst);                               \
   v16u8 dst0_m, dst1_m, dst2_m, dst3_m;                             \
   v16i8 tmp0_m, tmp1_m;                                             \
@@ -109,18 +109,18 @@
   ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride);                      \
 }
 
-#define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) {   \
+#define VPX_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) {   \
   v8i16 c0_m, c1_m, c2_m, c3_m;                                     \
   v8i16 step0_m, step1_m;                                           \
   v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                             \
                                                                     \
-  c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
-  c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
+  c0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
+  c1_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
   step0_m = __msa_ilvr_h(in2, in0);                                 \
   DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m);        \
                                                                     \
-  c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
-  c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
+  c2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
+  c3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
   step1_m = __msa_ilvr_h(in3, in1);                                 \
   DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m);        \
   SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);      \
@@ -132,7 +132,7 @@
               out0, out1, out2, out3);                              \
 }
 
-#define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) {  \
+#define VPX_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) {  \
   v8i16 res0_m, res1_m, c0_m, c1_m;                                 \
   v8i16 k1_m, k2_m, k3_m, k4_m;                                     \
   v8i16 zero_m = { 0 };                                             \
@@ -210,7 +210,7 @@
 }
 
 /* idct 8x8 macro */
-#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,               \
+#define VPX_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,               \
                        out0, out1, out2, out3, out4, out5, out6, out7) {     \
   v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m;              \
   v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m;              \
@@ -234,8 +234,8 @@
   tp4_m = in1 + in3;                                                         \
   PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m);                 \
   tp7_m = in7 + in5;                                                         \
-  k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                       \
-  k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                        \
+  k2_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                       \
+  k3_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                        \
   VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m,                       \
            in0, in4, in2, in6);                                              \
   BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m);               \
@@ -328,7 +328,7 @@
   out7 = -in7;                                                             \
 }
 
-#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8,        \
+#define VPX_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8,        \
                          r9, r10, r11, r12, r13, r14, r15,          \
                          out0, out1, out2, out3, out4, out5,        \
                          out6, out7, out8, out9, out10, out11,      \
@@ -340,40 +340,40 @@
   v8i16 k0_m, k1_m, k2_m, k3_m;                                     \
                                                                     \
   /* stage 1 */                                                     \
-  k0_m = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);               \
-  k1_m = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);              \
-  k2_m = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);              \
-  k3_m = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);             \
+  k0_m = VPX_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);               \
+  k1_m = VPX_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);              \
+  k2_m = VPX_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);              \
+  k3_m = VPX_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);             \
   MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m,                  \
           g0_m, g1_m, g2_m, g3_m);                                  \
-  k0_m = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);               \
-  k1_m = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);              \
-  k2_m = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);              \
-  k3_m = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);             \
+  k0_m = VPX_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);               \
+  k1_m = VPX_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);              \
+  k2_m = VPX_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);              \
+  k3_m = VPX_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);             \
   MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m,                 \
           g4_m, g5_m, g6_m, g7_m);                                  \
-  k0_m = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);               \
-  k1_m = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);              \
-  k2_m = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);               \
-  k3_m = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);              \
+  k0_m = VPX_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);               \
+  k1_m = VPX_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);              \
+  k2_m = VPX_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);               \
+  k3_m = VPX_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);              \
   MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m,                 \
           g8_m, g9_m, g10_m, g11_m);                                \
-  k0_m = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);              \
-  k1_m = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);             \
-  k2_m = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);               \
-  k3_m = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);              \
+  k0_m = VPX_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);              \
+  k1_m = VPX_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);             \
+  k2_m = VPX_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);               \
+  k3_m = VPX_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);              \
   MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m,                  \
           g12_m, g13_m, g14_m, g15_m);                              \
                                                                     \
   /* stage 2 */                                                     \
-  k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);               \
-  k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);              \
-  k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);              \
+  k0_m = VPX_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);               \
+  k1_m = VPX_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);              \
+  k2_m = VPX_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);              \
   MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m,          \
           h0_m, h1_m, h2_m, h3_m);                                  \
-  k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);              \
-  k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);             \
-  k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);             \
+  k0_m = VPX_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);              \
+  k1_m = VPX_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);             \
+  k2_m = VPX_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);             \
   MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m,         \
           h4_m, h5_m, h6_m, h7_m);                                  \
   BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10);    \
@@ -382,19 +382,19 @@
                                                                     \
   /* stage 3 */                                                     \
   BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m);  \
-  k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
-  k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
-  k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);              \
+  k0_m = VPX_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
+  k1_m = VPX_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
+  k2_m = VPX_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);              \
   MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m,           \
           out4, out6, out5, out7);                                  \
   MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m,           \
           out12, out14, out13, out15);                              \
                                                                     \
   /* stage 4 */                                                     \
-  k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
-  k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);            \
-  k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
-  k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);             \
+  k0_m = VPX_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
+  k1_m = VPX_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);            \
+  k2_m = VPX_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
+  k3_m = VPX_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);             \
   MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3);                 \
   MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7);                   \
   MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11);               \
diff --git a/vpx_dsp/mips/loopfilter_16_msa.c b/vpx_dsp/mips/loopfilter_16_msa.c
index b7c9f7b..09f132d 100644
--- a/vpx_dsp/mips/loopfilter_16_msa.c
+++ b/vpx_dsp/mips/loopfilter_16_msa.c
@@ -35,8 +35,8 @@
   /* mask and hev */
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
-  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
@@ -46,12 +46,12 @@
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r,
                q2_r, q3_r);
-    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
     ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+    VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                 p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
 
     /* convert 16 bit output data into 8 bit */
@@ -94,7 +94,7 @@
 
   LD_UB8((src - 8 * pitch), pitch, p7, p6, p5, p4, p3, p2, p1, p0);
   LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7);
-  VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+  VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
 
   if (__msa_test_bz_v(flat2)) {
     LD_UB4(filter48, 16, p2, p1, p0, q0);
@@ -451,8 +451,8 @@
 
     LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                  hev, mask, flat);
-    VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-    VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+    VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+    VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
                        q1_out);
 
     flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
@@ -468,7 +468,7 @@
       ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                  zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
                  q3_r);
-      VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
+      VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
                   p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
 
       /* convert 16 bit output data into 8 bit */
@@ -489,7 +489,7 @@
       LD_UB4((src - 8 * pitch), pitch, p7, p6, p5, p4);
       LD_UB4(src + (4 * pitch), pitch, q4, q5, q6, q7);
 
-      VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+      VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
 
       if (__msa_test_bz_v(flat2)) {
         p2_d = __msa_copy_u_d((v2i64)p2_out, 0);
@@ -768,9 +768,9 @@
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
   /* flat4 */
-  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
-  VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
 
@@ -783,7 +783,7 @@
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
                q3_r);
-    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
     /* convert 16 bit output data into 8 bit */
@@ -827,7 +827,7 @@
   LD_UB8((src - 8 * 16), 16, p7, p6, p5, p4, p3, p2, p1, p0);
   LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
 
-  VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+  VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
 
   if (__msa_test_bz_v(flat2)) {
     v8i16 vec0, vec1, vec2, vec3, vec4;
@@ -1082,9 +1082,9 @@
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
   /* flat4 */
-  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
-  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
@@ -1102,11 +1102,11 @@
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
                q3_r);
-    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
     ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+    VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                 p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
 
     /* convert 16 bit output data into 8 bit */
@@ -1151,7 +1151,7 @@
   LD_UB8((src - 8 * 16), 16, p7, p6, p5, p4, p3, p2, p1, p0);
   LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
 
-  VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+  VPX_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
 
   if (__msa_test_bz_v(flat2)) {
     v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
diff --git a/vpx_dsp/mips/loopfilter_4_msa.c b/vpx_dsp/mips/loopfilter_4_msa.c
index daf5f38..303276a 100644
--- a/vpx_dsp/mips/loopfilter_4_msa.c
+++ b/vpx_dsp/mips/loopfilter_4_msa.c
@@ -30,7 +30,7 @@
 
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
-  VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
   p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
@@ -66,7 +66,7 @@
 
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
                hev, mask, flat);
-  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
 
   ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch);
 }
@@ -92,7 +92,7 @@
                      p3, p2, p1, p0, q0, q1, q2, q3);
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
-  VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
   ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1);
   ILVRL_H2_SH(vec1, vec0, vec2, vec3);
 
@@ -138,7 +138,7 @@
 
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
                hev, mask, flat);
-  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
   ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
   ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
   ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
diff --git a/vpx_dsp/mips/loopfilter_8_msa.c b/vpx_dsp/mips/loopfilter_8_msa.c
index 00b6db5..2bfcd4d 100644
--- a/vpx_dsp/mips/loopfilter_8_msa.c
+++ b/vpx_dsp/mips/loopfilter_8_msa.c
@@ -34,8 +34,8 @@
 
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
-  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
 
@@ -49,7 +49,7 @@
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r,
                q2_r, q3_r);
-    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
+    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8,
                 p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
 
     /* convert 16 bit output data into 8 bit */
@@ -117,8 +117,8 @@
   /* mask and hev */
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
-  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
-  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch);
@@ -126,12 +126,12 @@
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r,
                q2_r, q3_r);
-    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
     ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
-    VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+    VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                 p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
 
     /* convert 16 bit output data into 8 bit */
@@ -187,9 +187,9 @@
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
   /* flat4 */
-  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
-  VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  VPX_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat);
 
@@ -206,7 +206,7 @@
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
                q3_r);
-    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
     /* convert 16 bit output data into 8 bit */
     PCKEV_B4_SH(p2_filt8_r, p2_filt8_r, p1_filt8_r, p1_filt8_r, p0_filt8_r,
@@ -283,9 +283,9 @@
   LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
                hev, mask, flat);
   /* flat4 */
-  VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+  VPX_FLAT4(p3, p2, p0, q0, q2, q3, flat);
   /* filter4 */
-  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
+  VPX_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
 
   if (__msa_test_bz_v(flat)) {
     ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1);
@@ -301,14 +301,14 @@
     ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1,
                zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r,
                q3_r);
-    VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
+    VPX_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r,
                 p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r);
 
     ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l);
     ILVL_B4_UH(zero, q0, zero, q1, zero, q2, zero, q3, q0_l, q1_l, q2_l, q3_l);
 
     /* filter8 */
-    VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+    VPX_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
                 p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
 
     /* convert 16 bit output data into 8 bit */
diff --git a/vpx_dsp/mips/loopfilter_filters_dspr2.h b/vpx_dsp/mips/loopfilter_filters_dspr2.h
index 4a1506b..db39854 100644
--- a/vpx_dsp/mips/loopfilter_filters_dspr2.h
+++ b/vpx_dsp/mips/loopfilter_filters_dspr2.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_FILTERS_DSPR2_H_
-#define VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_FILTERS_DSPR2_H_
+#ifndef VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
+#define VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
 
 #include <stdlib.h>
 
@@ -761,4 +761,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_FILTERS_DSPR2_H_
+#endif  // VPX_DSP_MIPS_LOOPFILTER_FILTERS_DSPR2_H_
diff --git a/vpx_dsp/mips/loopfilter_macros_dspr2.h b/vpx_dsp/mips/loopfilter_macros_dspr2.h
index 994ff18..a990b40 100644
--- a/vpx_dsp/mips/loopfilter_macros_dspr2.h
+++ b/vpx_dsp/mips/loopfilter_macros_dspr2.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_MACROS_DSPR2_H_
-#define VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_MACROS_DSPR2_H_
+#ifndef VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
+#define VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
 
 #include <stdlib.h>
 
@@ -475,4 +475,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_MACROS_DSPR2_H_
+#endif  // VPX_DSP_MIPS_LOOPFILTER_MACROS_DSPR2_H_
diff --git a/vpx_dsp/mips/loopfilter_masks_dspr2.h b/vpx_dsp/mips/loopfilter_masks_dspr2.h
index 2c964af..9bf2927 100644
--- a/vpx_dsp/mips/loopfilter_masks_dspr2.h
+++ b/vpx_dsp/mips/loopfilter_masks_dspr2.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_MASKS_DSPR2_H_
-#define VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_MASKS_DSPR2_H_
+#ifndef VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
+#define VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
 
 #include <stdlib.h>
 
@@ -370,4 +370,4 @@
 }  // extern "C"
 #endif
 
-#endif  // VP9_COMMON_MIPS_DSPR2_VP9_LOOPFILTER_MASKS_DSPR2_H_
+#endif  // VPX_DSP_MIPS_LOOPFILTER_MASKS_DSPR2_H_
diff --git a/vpx_dsp/mips/loopfilter_msa.h b/vpx_dsp/mips/loopfilter_msa.h
index 62b1706..9894701 100644
--- a/vpx_dsp/mips/loopfilter_msa.h
+++ b/vpx_dsp/mips/loopfilter_msa.h
@@ -13,7 +13,7 @@
 
 #include "vpx_dsp/mips/macros_msa.h"
 
-#define VP9_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in,  \
+#define VPX_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in,  \
                            p1_out, p0_out, q0_out, q1_out) {             \
   v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign;                    \
   v16i8 filt, filt1, filt2, cnst4b, cnst3b;                              \
@@ -63,7 +63,7 @@
   p1_out = __msa_xori_b((v16u8)p1_m, 0x80);                              \
 }
 
-#define VP9_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in,  \
+#define VPX_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in,  \
                            p1_out, p0_out, q0_out, q1_out) {             \
   v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign;                    \
   v16i8 filt, filt1, filt2, cnst4b, cnst3b;                              \
@@ -120,7 +120,7 @@
   p1_out = __msa_xori_b((v16u8)p1_m, 0x80);                              \
 }
 
-#define VP9_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) {  \
+#define VPX_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) {  \
   v16u8 tmp, p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0;         \
   v16u8 zero_in = { 0 };                                                 \
                                                                          \
@@ -140,7 +140,7 @@
   flat_out = flat_out & (mask);                                          \
 }
 
-#define VP9_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in,  \
+#define VPX_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in,  \
                   q5_in, q6_in, q7_in, flat_in, flat2_out) {        \
   v16u8 tmp, zero_in = { 0 };                                       \
   v16u8 p4_a_sub_p0, q4_a_sub_q0, p5_a_sub_p0, q5_a_sub_q0;         \
@@ -169,7 +169,7 @@
   flat2_out = flat2_out & flat_in;                                  \
 }
 
-#define VP9_FILTER8(p3_in, p2_in, p1_in, p0_in,                  \
+#define VPX_FILTER8(p3_in, p2_in, p1_in, p0_in,                  \
                     q0_in, q1_in, q2_in, q3_in,                  \
                     p2_filt8_out, p1_filt8_out, p0_filt8_out,    \
                     q0_filt8_out, q1_filt8_out, q2_filt8_out) {  \
diff --git a/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c b/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
index 6fd5208..deec079 100644
--- a/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
+++ b/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
@@ -901,7 +901,7 @@
                    filters_y, y0_q4, y_step_q4, w, h);
 }
 
-// void vp9_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride,
+// void vpx_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride,
 //                          uint8_t *dst, ptrdiff_t dst_stride,
 //                          const int16_t *filter_x, int x_step_q4,
 //                          const int16_t *filter_y, int y_step_q4,
diff --git a/vpx_scale/generic/gen_scalers.c b/vpx_scale/generic/gen_scalers.c
index dab324e..e0e4591 100644
--- a/vpx_scale/generic/gen_scalers.c
+++ b/vpx_scale/generic/gen_scalers.c
@@ -33,7 +33,7 @@
  *  SPECIAL NOTES : None.
  *
  ****************************************************************************/
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source,
+void vpx_horizontal_line_5_4_scale_c(const unsigned char *source,
                                      unsigned int source_width,
                                      unsigned char *dest,
                                      unsigned int dest_width) {
@@ -64,7 +64,7 @@
 
 
 
-void vp8_vertical_band_5_4_scale_c(unsigned char *source,
+void vpx_vertical_band_5_4_scale_c(unsigned char *source,
                                    unsigned int src_pitch,
                                    unsigned char *dest,
                                    unsigned int dest_pitch,
@@ -96,7 +96,7 @@
 
 /*7***************************************************************************
  *
- *  ROUTINE       : vp8_horizontal_line_3_5_scale_c
+ *  ROUTINE       : vpx_horizontal_line_3_5_scale_c
  *
  *  INPUTS        : const unsigned char *source : Pointer to source data.
  *                  unsigned int source_width    : Stride of source.
@@ -114,7 +114,7 @@
  *
  *
  ****************************************************************************/
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source,
+void vpx_horizontal_line_5_3_scale_c(const unsigned char *source,
                                      unsigned int source_width,
                                      unsigned char *dest,
                                      unsigned int dest_width) {
@@ -142,7 +142,7 @@
 
 }
 
-void vp8_vertical_band_5_3_scale_c(unsigned char *source,
+void vpx_vertical_band_5_3_scale_c(unsigned char *source,
                                    unsigned int src_pitch,
                                    unsigned char *dest,
                                    unsigned int dest_pitch,
@@ -172,7 +172,7 @@
 
 /****************************************************************************
  *
- *  ROUTINE       : vp8_horizontal_line_1_2_scale_c
+ *  ROUTINE       : vpx_horizontal_line_1_2_scale_c
  *
  *  INPUTS        : const unsigned char *source : Pointer to source data.
  *                  unsigned int source_width    : Stride of source.
@@ -189,7 +189,7 @@
  *  SPECIAL NOTES : None.
  *
  ****************************************************************************/
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source,
+void vpx_horizontal_line_2_1_scale_c(const unsigned char *source,
                                      unsigned int source_width,
                                      unsigned char *dest,
                                      unsigned int dest_width) {
@@ -208,7 +208,7 @@
   }
 }
 
-void vp8_vertical_band_2_1_scale_c(unsigned char *source,
+void vpx_vertical_band_2_1_scale_c(unsigned char *source,
                                    unsigned int src_pitch,
                                    unsigned char *dest,
                                    unsigned int dest_pitch,
@@ -218,7 +218,7 @@
   memcpy(dest, source, dest_width);
 }
 
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source,
+void vpx_vertical_band_2_1_scale_i_c(unsigned char *source,
                                      unsigned int src_pitch,
                                      unsigned char *dest,
                                      unsigned int dest_pitch,
diff --git a/vpx_scale/generic/vpx_scale.c b/vpx_scale/generic/vpx_scale.c
index 15e4ba8..aaae4c7 100644
--- a/vpx_scale/generic/vpx_scale.c
+++ b/vpx_scale/generic/vpx_scale.c
@@ -289,15 +289,15 @@
   switch (hratio * 10 / hscale) {
     case 8:
       /* 4-5 Scale in Width direction */
-      horiz_line_scale = vp8_horizontal_line_5_4_scale;
+      horiz_line_scale = vpx_horizontal_line_5_4_scale;
       break;
     case 6:
       /* 3-5 Scale in Width direction */
-      horiz_line_scale = vp8_horizontal_line_5_3_scale;
+      horiz_line_scale = vpx_horizontal_line_5_3_scale;
       break;
     case 5:
       /* 1-2 Scale in Width direction */
-      horiz_line_scale = vp8_horizontal_line_2_1_scale;
+      horiz_line_scale = vpx_horizontal_line_2_1_scale;
       break;
     default:
       /* The ratio is not acceptable now */
@@ -309,13 +309,13 @@
   switch (vratio * 10 / vscale) {
     case 8:
       /* 4-5 Scale in vertical direction */
-      vert_band_scale     = vp8_vertical_band_5_4_scale;
+      vert_band_scale     = vpx_vertical_band_5_4_scale;
       source_band_height  = 5;
       dest_band_height    = 4;
       break;
     case 6:
       /* 3-5 Scale in vertical direction */
-      vert_band_scale     = vp8_vertical_band_5_3_scale;
+      vert_band_scale     = vpx_vertical_band_5_3_scale;
       source_band_height  = 5;
       dest_band_height    = 3;
       break;
@@ -324,12 +324,12 @@
 
       if (interlaced) {
         /* if the content is interlaced, point sampling is used */
-        vert_band_scale     = vp8_vertical_band_2_1_scale;
+        vert_band_scale     = vpx_vertical_band_2_1_scale;
       } else {
 
         interpolation = 1;
         /* if the content is progressive, interplo */
-        vert_band_scale     = vp8_vertical_band_2_1_scale_i;
+        vert_band_scale     = vpx_vertical_band_2_1_scale_i;
 
       }
 
diff --git a/vpx_scale/generic/yv12config.c b/vpx_scale/generic/yv12config.c
index 815041d..c56ab0e 100644
--- a/vpx_scale/generic/yv12config.c
+++ b/vpx_scale/generic/yv12config.c
@@ -146,7 +146,7 @@
                              vpx_get_frame_buffer_cb_fn_t cb,
                              void *cb_priv) {
   if (ybf) {
-    const int vp9_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
+    const int vpx_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
     const int aligned_width = (width + 7) & ~7;
     const int aligned_height = (height + 7) & ~7;
     const int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
@@ -269,13 +269,13 @@
 #endif  // CONFIG_VPX_HIGHBITDEPTH
 
     ybf->y_buffer = (uint8_t *)yv12_align_addr(
-        buf + (border * y_stride) + border, vp9_byte_align);
+        buf + (border * y_stride) + border, vpx_byte_align);
     ybf->u_buffer = (uint8_t *)yv12_align_addr(
         buf + yplane_size + (uv_border_h * uv_stride) + uv_border_w,
-        vp9_byte_align);
+        vpx_byte_align);
     ybf->v_buffer = (uint8_t *)yv12_align_addr(
         buf + yplane_size + uvplane_size + (uv_border_h * uv_stride) +
-        uv_border_w, vp9_byte_align);
+        uv_border_w, vpx_byte_align);
 
 #if CONFIG_ALPHA
     ybf->alpha_width = alpha_width;
@@ -283,7 +283,7 @@
     ybf->alpha_stride = alpha_stride;
     ybf->alpha_buffer = (uint8_t *)yv12_align_addr(
         buf + yplane_size + 2 * uvplane_size +
-        (alpha_border_h * alpha_stride) + alpha_border_w, vp9_byte_align);
+        (alpha_border_h * alpha_stride) + alpha_border_w, vpx_byte_align);
 #endif
     ybf->corrupted = 0; /* assume not corrupted by errors */
     return 0;
diff --git a/vpx_scale/vpx_scale_rtcd.pl b/vpx_scale/vpx_scale_rtcd.pl
index 73f289d..f461572 100644
--- a/vpx_scale/vpx_scale_rtcd.pl
+++ b/vpx_scale/vpx_scale_rtcd.pl
@@ -7,13 +7,13 @@
 
 # Scaler functions
 if (vpx_config("CONFIG_SPATIAL_RESAMPLING") eq "yes") {
-    add_proto qw/void vp8_horizontal_line_5_4_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
-    add_proto qw/void vp8_vertical_band_5_4_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
-    add_proto qw/void vp8_horizontal_line_5_3_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
-    add_proto qw/void vp8_vertical_band_5_3_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
-    add_proto qw/void vp8_horizontal_line_2_1_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
-    add_proto qw/void vp8_vertical_band_2_1_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
-    add_proto qw/void vp8_vertical_band_2_1_scale_i/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
+    add_proto qw/void vpx_horizontal_line_5_4_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
+    add_proto qw/void vpx_vertical_band_5_4_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
+    add_proto qw/void vpx_horizontal_line_5_3_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
+    add_proto qw/void vpx_vertical_band_5_3_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
+    add_proto qw/void vpx_horizontal_line_2_1_scale/, "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width";
+    add_proto qw/void vpx_vertical_band_2_1_scale/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
+    add_proto qw/void vpx_vertical_band_2_1_scale_i/, "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width";
 }
 
 add_proto qw/void vpx_yv12_extend_frame_borders/, "struct yv12_buffer_config *ybf";
diff --git a/vpxenc.c b/vpxenc.c
index 4cce0f3..f3c8217 100644
--- a/vpxenc.c
+++ b/vpxenc.c
@@ -339,8 +339,8 @@
 static const arg_def_t arnr_type = ARG_DEF(
     NULL, "arnr-type", 1, "AltRef type");
 static const struct arg_enum_list tuning_enum[] = {
-  {"psnr", VP8_TUNE_PSNR},
-  {"ssim", VP8_TUNE_SSIM},
+  {"psnr", VPX_TUNE_PSNR},
+  {"ssim", VPX_TUNE_SSIM},
   {NULL, 0}
 };
 static const arg_def_t tune_ssim = ARG_DEF_ENUM(
@@ -412,8 +412,8 @@
 #endif
 
 static const struct arg_enum_list tune_content_enum[] = {
-  {"default", VP9E_CONTENT_DEFAULT},
-  {"screen", VP9E_CONTENT_SCREEN},
+  {"default", VPX_CONTENT_DEFAULT},
+  {"screen", VPX_CONTENT_SCREEN},
   {NULL, 0}
 };