1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * Video for Linux Two header file for Exynos
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *		http://www.samsung.com
7  *
8  * This header file contains several v4l2 APIs to be proposed to v4l2
9  * community and until being accepted, will be used restrictly for Exynos.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 
16 #ifndef __LINUX_VIDEODEV2_EXYNOS_MEDIA_H
17 #define __LINUX_VIDEODEV2_EXYNOS_MEDIA_H
18 
19 #include <linux/videodev2.h>
20 
21 /*
22  *	C O N T R O L S
23  */
24 /* CID base for Exynos controls (USER_CLASS) */
25 #define V4L2_CID_EXYNOS_BASE		(V4L2_CTRL_CLASS_USER | 0x2000)
26 
27 /* cacheable configuration */
28 #define V4L2_CID_CACHEABLE		(V4L2_CID_EXYNOS_BASE + 10)
29 
30 /* for color space conversion equation selection */
31 #define V4L2_CID_CSC_EQ_MODE		(V4L2_CID_EXYNOS_BASE + 100)
32 #define V4L2_CID_CSC_EQ			(V4L2_CID_EXYNOS_BASE + 101)
33 #define V4L2_CID_CSC_RANGE		(V4L2_CID_EXYNOS_BASE + 102)
34 
35 /* for DRM playback scenario */
36 #define V4L2_CID_CONTENT_PROTECTION	(V4L2_CID_EXYNOS_BASE + 201)
37 
38 /*
39  *	V I D E O   I M A G E   F O R M A T
40  */
41 /* 1 plane -- one Y, one Cb + Cr interleaved, non contiguous  */
42 #define V4L2_PIX_FMT_NV12N		v4l2_fourcc('N', 'N', '1', '2')
43 #define V4L2_PIX_FMT_NV12NT		v4l2_fourcc('T', 'N', '1', '2')
44 
45 /* 1 plane -- one Y, one Cb, one Cr, non contiguous */
46 #define V4L2_PIX_FMT_YUV420N		v4l2_fourcc('Y', 'N', '1', '2')
47 
48 /* 1 plane -- 8bit Y, 2bit Y, 8bit Cb + Cr interleaved, 2bit Cb + Cr interleaved, non contiguous */
49 #define V4L2_PIX_FMT_NV12N_10B		v4l2_fourcc('B', 'N', '1', '2')
50 #define V4L2_PIX_FMT_NV12M_S10B		v4l2_fourcc('B', 'M', '1', '2')
51 #define V4L2_PIX_FMT_NV21M_S10B		v4l2_fourcc('B', 'M', '2', '1')
52 #define V4L2_PIX_FMT_NV16M_S10B		v4l2_fourcc('B', 'M', '1', '6')
53 #define V4L2_PIX_FMT_NV61M_S10B		v4l2_fourcc('B', 'M', '6', '1')
54 #define V4L2_PIX_FMT_NV12M_P010		v4l2_fourcc('P', 'M', '1', '2')
55 #define V4L2_PIX_FMT_NV21M_P010		v4l2_fourcc('P', 'M', '2', '1')
56 #define V4L2_PIX_FMT_NV16M_P210		v4l2_fourcc('P', 'M', '1', '6')
57 #define V4L2_PIX_FMT_NV61M_P210		v4l2_fourcc('P', 'M', '6', '1')
58 
59 #define V4L2_PIX_FMT_NV12N_P010		v4l2_fourcc('N', 'P', '1', '2')
60 #define V4L2_PIX_FMT_NV12_P010		v4l2_fourcc('P', 'N', '1', '2')
61 
62 /* 12 Y/CbCr 4:2:0 SBWC */
63 #define V4L2_PIX_FMT_NV12M_SBWC_8B	v4l2_fourcc('M', '1', 'S', '8')
64 #define V4L2_PIX_FMT_NV12M_SBWC_10B	v4l2_fourcc('M', '1', 'S', '1')
65 
66 /* 21 Y/CrCb 4:2:0 SBWC */
67 #define V4L2_PIX_FMT_NV21M_SBWC_8B	v4l2_fourcc('M', '2', 'S', '8')
68 #define V4L2_PIX_FMT_NV21M_SBWC_10B	v4l2_fourcc('M', '2', 'S', '1')
69 
70 /* 12 Y/CbCr 4:2:0 SBWC single */
71 #define V4L2_PIX_FMT_NV12N_SBWC_8B	v4l2_fourcc('N', '1', 'S', '8')
72 #define V4L2_PIX_FMT_NV12N_SBWC_10B	v4l2_fourcc('N', '1', 'S', '1')
73 
74 /* 12 Y/CbCr 4:2:0 SBWC Lossy */
75 #define V4L2_PIX_FMT_NV12M_SBWCL_8B	v4l2_fourcc('M', '1', 'L', '8')
76 #define V4L2_PIX_FMT_NV12M_SBWCL_10B	v4l2_fourcc('M', '1', 'L', '1')
77 
78 /* 12 Y/CbCr 4:2:0 SBWC Lossy single */
79 #define V4L2_PIX_FMT_NV12N_SBWCL_8B	v4l2_fourcc('N', '1', 'L', '8')
80 #define V4L2_PIX_FMT_NV12N_SBWCL_10B	v4l2_fourcc('N', '1', 'L', '1')
81 
82 /* 12 Y/CbCr 4:2:0 AFBC */
83 #define V4L2_PIX_FMT_NV12M_AFBC_8B	v4l2_fourcc('M', '1', 'A', '8')
84 #define V4L2_PIX_FMT_NV12M_AFBC_10B	v4l2_fourcc('M', '1', 'A', '1')
85 
86 /* helper macros */
87 #ifndef __ALIGN_UP
88 #define __ALIGN_UP(x, a)		(((x) + ((a) - 1)) & ~((a) - 1))
89 #endif
90 
91 #define NV12N_STRIDE(w)			(__ALIGN_UP((w), 64))
92 #define NV12N_Y_SIZE(w, h)		(NV12N_STRIDE(w) * __ALIGN_UP((h), 16))
93 #define NV12N_CBCR_SIZE(w, h)		(NV12N_STRIDE(w) * __ALIGN_UP((h), 16) / 2)
94 #define NV12N_CBCR_BASE(base, w, h)		\
95 	((base) + NV12N_Y_SIZE((w), (h)))
96 #define NV12N_10B_Y_8B_SIZE(w, h)		\
97 	(__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) + 256)
98 #define NV12N_10B_Y_2B_SIZE(w, h)		\
99 	((__ALIGN_UP((w) / 4, 16) * __ALIGN_UP((h), 16) + 64))
100 #define NV12N_10B_CBCR_8B_SIZE(w, h)		\
101 (__ALIGN_UP((__ALIGN_UP((w), 64) * (__ALIGN_UP((h), 16) / 2) + 256), 16))
102 #define NV12N_10B_CBCR_2B_SIZE(w, h)		\
103 	((__ALIGN_UP((w) / 4, 16) * (__ALIGN_UP((h), 16) / 2) + 64))
104 #define NV12N_10B_CBCR_BASE(base, w, h)		\
105 	((base) + NV12N_10B_Y_8B_SIZE((w), (h)) + NV12N_10B_Y_2B_SIZE((w), (h)))
106 
107 #define YUV420N_Y_SIZE(w, h)			\
108 	(__ALIGN_UP((w), 16) * __ALIGN_UP((h), 16) + 256)
109 #define YUV420N_CB_SIZE(w, h)			\
110 (__ALIGN_UP((__ALIGN_UP((w) / 2, 16) * (__ALIGN_UP((h), 16) / 2) + 256), 16))
111 #define YUV420N_CR_SIZE(w, h)			\
112 (__ALIGN_UP((__ALIGN_UP((w) / 2, 16) * (__ALIGN_UP((h), 16) / 2) + 256), 16))
113 #define YUV420N_CB_BASE(base, w, h)	((base) + YUV420N_Y_SIZE((w), (h)))
114 #define YUV420N_CR_BASE(base, w, h)		\
115 	(YUV420N_CB_BASE((base), (w), (h)) + YUV420N_CB_SIZE((w), (h)))
116 
117 #define NV12M_Y_SIZE(w, h)			\
118 	(__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) + 256)
119 #define NV12M_CBCR_SIZE(w, h)			\
120 	((__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) / 2) + 256)
121 #define NV12M_Y_2B_SIZE(w, h)			\
122 	(__ALIGN_UP((w / 4), 16) * __ALIGN_UP((h), 16) + 256)
123 #define NV12M_CBCR_2B_SIZE(w, h)		\
124 	((__ALIGN_UP((w / 4), 16) * __ALIGN_UP((h), 16) / 2) + 256)
125 
126 #define NV16M_Y_SIZE(w, h)			\
127 	(__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) + 256)
128 #define NV16M_CBCR_SIZE(w, h)			\
129 	(__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) + 256)
130 #define NV16M_Y_2B_SIZE(w, h)			\
131 	(__ALIGN_UP((w / 4), 16) * __ALIGN_UP((h), 16) + 256)
132 #define NV16M_CBCR_2B_SIZE(w, h)		\
133 	(__ALIGN_UP((w / 4), 16) * __ALIGN_UP((h), 16) + 256)
134 
135 #define S10B_8B_STRIDE(w)		(__ALIGN_UP((w), 64))
136 #define S10B_2B_STRIDE(w)		(__ALIGN_UP(((w + 3) / 4), 16))
137 
138 /* Compress format */
139 
140 /* SBWC */
141 #define __COUNT_BLOCKS(x, a)		(((x) + ((a) - 1)) / (a))
142 
143 #define SBWC_HEADER_STRIDE_ALIGN	16
144 #define SBWC_PAYLOAD_STRIDE_ALIGN	64
145 
146 #define SBWC_BLOCK_WIDTH		32
147 #define SBWC_BLOCK_HEIGHT		4
148 
149 #define SBWC_ALIGNED_H(h, a)		__ALIGN_UP((h), a)
150 
151 #define SBWC_H_BLOCKS(w)		__COUNT_BLOCKS((w), SBWC_BLOCK_WIDTH)
152 
153 #define SBWC_8B_STRIDE(w)		(__ALIGN_UP((8 / 2) *			\
154 						SBWC_BLOCK_WIDTH,		\
155 						SBWC_PAYLOAD_STRIDE_ALIGN) *	\
156 						SBWC_H_BLOCKS(w))
157 #define SBWC_10B_STRIDE(w)		(__ALIGN_UP((10 / 2) *			\
158 						SBWC_BLOCK_WIDTH,		\
159 						SBWC_PAYLOAD_STRIDE_ALIGN) *	\
160 						SBWC_H_BLOCKS(w))
161 #define SBWC_HEADER_STRIDE(w)		(__ALIGN_UP(__COUNT_BLOCKS(w,		\
162 						SBWC_BLOCK_WIDTH * 2),		\
163 						SBWC_HEADER_STRIDE_ALIGN))
164 
165 #define SBWC_Y_VSTRIDE_BLOCKS(h, a)	__COUNT_BLOCKS(SBWC_ALIGNED_H(h, a),	\
166 						SBWC_BLOCK_HEIGHT)
167 #define SBWC_CBCR_VSTRIDE_BLOCKS(h, a)	__COUNT_BLOCKS(SBWC_ALIGNED_H(h, a) / 2,\
168 						SBWC_BLOCK_HEIGHT)
169 
170 /* Height aligned to 16 for H.264 */
171 #define SBWC_8B_Y_SIZE(w, h)		((SBWC_8B_STRIDE(w) *			\
172 					  SBWC_Y_VSTRIDE_BLOCKS(h, 16)) + 64)
173 #define SBWC_8B_CBCR_SIZE(w, h)		((SBWC_8B_STRIDE(w) *			\
174 					  SBWC_CBCR_VSTRIDE_BLOCKS(h, 16)) + 64)
175 
176 #define SBWC_8B_Y_HEADER_SIZE(w, h)	((SBWC_HEADER_STRIDE(w) *		\
177 					  SBWC_Y_VSTRIDE_BLOCKS(h, 16)) + 256)
178 
179 #define SBWC_8B_CBCR_HEADER_SIZE(w, h)	((SBWC_HEADER_STRIDE(w) *		\
180 					  SBWC_CBCR_VSTRIDE_BLOCKS(h, 16)) + 128)
181 
182 /* Height aligned to 8 for H.265 and VP9 */
183 #define SBWC_10B_Y_SIZE(w, h)		((SBWC_10B_STRIDE(w) *			\
184 					  SBWC_Y_VSTRIDE_BLOCKS(h, 8)) + 64)
185 #define SBWC_10B_CBCR_SIZE(w, h)	((SBWC_10B_STRIDE(w) *			\
186 					  SBWC_CBCR_VSTRIDE_BLOCKS(h, 8)) + 64)
187 #define SBWC_10B_Y_HEADER_SIZE(w, h)	((SBWC_HEADER_STRIDE(w) *		\
188 					  SBWC_Y_VSTRIDE_BLOCKS(h, 8)) + 256)
189 #define SBWC_10B_CBCR_HEADER_SIZE(w, h)	((SBWC_HEADER_STRIDE(w) *		\
190 					  SBWC_CBCR_VSTRIDE_BLOCKS(h, 8)) + 128)
191 
192 /* SBWC - single fd */
193 #define SBWC_8B_CBCR_BASE(base, w, h)					\
194 	((base) + SBWC_8B_Y_SIZE(w, h) + SBWC_8B_Y_HEADER_SIZE(w, h))
195 #define SBWC_10B_CBCR_BASE(base, w, h)					\
196 	((base) + SBWC_10B_Y_SIZE(w, h) + SBWC_10B_Y_HEADER_SIZE(w, h))
197 
198 /* SBWC Lossy */
199 #define SBWCL_8B_STRIDE(w, r)	(((128 * (r)) / 100) * (((w) + 31) / 32))
200 #define SBWCL_10B_STRIDE(w, r)	(((160 * (r)) / 100) * (((w) + 31) / 32))
201 
202 #define SBWCL_8B_Y_SIZE(w, h, r)					\
203 	((SBWCL_8B_STRIDE(w, r) * ((__ALIGN_UP((h), 16) + 3) / 4)) + 64)
204 #define SBWCL_8B_CBCR_SIZE(w, h, r)					\
205 	((SBWCL_8B_STRIDE(w, r) * (((__ALIGN_UP((h), 16) / 2) + 3) / 4)) + 64)
206 
207 #define SBWCL_10B_Y_SIZE(w, h, r)					\
208 	((SBWCL_10B_STRIDE(w, r) * ((__ALIGN_UP((h), 16) + 3) / 4)) + 64)
209 #define SBWCL_10B_CBCR_SIZE(w, h, r)					\
210 	((SBWCL_10B_STRIDE(w, r) * (((__ALIGN_UP((h), 16) / 2) + 3) / 4)) + 64)
211 
212 #define SBWCL_8B_CBCR_BASE(base, w, h, r)				\
213 	((base) + SBWCL_8B_Y_SIZE(w, h, r))
214 #define SBWCL_10B_CBCR_BASE(base, w, h, r)				\
215 	((base) + SBWCL_10B_Y_SIZE(w, h, r))
216 
217 /* AFBC */
218 #define AFBC_8B_STRIDE(w)		__ALIGN_UP(w, 16)
219 #define AFBC_10B_STRIDE(w)		__ALIGN_UP(w * 2, 16)
220 
221 #define AFBC_8B_Y_SIZE(w, h)							\
222 	((((((w) + 31) / 32) * (((h) + 7) / 8) * 16 + 127) / 128) * 128 +	\
223 	(((w) + 31) / 32) * (((h) + 7) / 8) * 384)
224 #define AFBC_10B_Y_SIZE(w, h)							\
225 	((((((w) + 31) / 32) * (((h) + 7) / 8) * 16 + 127) / 128) * 128 +	\
226 	(((w) + 31) / 32) * (((h) + 7) / 8) * 512)
227 #endif /* __LINUX_VIDEODEV2_EXYNOS_MEDIA_H */
228