1 /* $OpenBSD: buf.h,v 1.57 2007/05/28 18:08:47 pedro Exp $ */
2 /* $NetBSD: buf.h,v 1.25 1997/04/09 21:12:17 mycroft Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)buf.h 8.7 (Berkeley) 1/21/94
38 */
39
40 #ifndef _SYS_BUF_H_
41 #define _SYS_BUF_H_
42 #include <sys/queue.h>
43
44 #define NOLIST ((struct buf *)0x87654321)
45
46 struct buf;
47 struct vnode;
48
49 LIST_HEAD(bufhead, buf);
50
51 /*
52 * To avoid including <ufs/ffs/softdep.h>
53 */
54
55 LIST_HEAD(workhead, worklist);
56
57 /*
58 * These are currently used only by the soft dependency code, hence
59 * are stored once in a global variable. If other subsystems wanted
60 * to use these hooks, a pointer to a set of bio_ops could be added
61 * to each buffer.
62 */
63 extern struct bio_ops {
64 void (*io_start)(struct buf *);
65 void (*io_complete)(struct buf *);
66 void (*io_deallocate)(struct buf *);
67 void (*io_movedeps)(struct buf *, struct buf *);
68 int (*io_countdeps)(struct buf *, int, int);
69 } bioops;
70
71 /*
72 * The buffer header describes an I/O operation in the kernel.
73 */
74 struct buf {
75 LIST_ENTRY(buf) b_list; /* All allocated buffers. */
76 LIST_ENTRY(buf) b_hash; /* Hash chain. */
77 LIST_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */
78 TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */
79 time_t b_synctime; /* Time this buffer should be flushed */
80 struct buf *b_actf, **b_actb; /* Device driver queue when active. */
81 struct proc *b_proc; /* Associated proc; NULL if kernel. */
82 volatile long b_flags; /* B_* flags. */
83 int b_error; /* Errno value. */
84 long b_bufsize; /* Allocated buffer size. */
85 long b_bcount; /* Valid bytes in buffer. */
86 size_t b_resid; /* Remaining I/O. */
87 dev_t b_dev; /* Device associated with buffer. */
88 caddr_t b_data; /* associated data */
89 void *b_saveaddr; /* Original b_data for physio. */
90 daddr64_t b_lblkno; /* Logical block number. */
91 daddr64_t b_blkno; /* Underlying physical block number. */
92 /* Function to call upon completion.
93 * Will be called at splbio(). */
94 void (*b_iodone)(struct buf *);
95 struct vnode *b_vp; /* Device vnode. */
96 int b_dirtyoff; /* Offset in buffer of dirty region. */
97 int b_dirtyend; /* Offset of end of dirty region. */
98 int b_validoff; /* Offset in buffer of valid region. */
99 int b_validend; /* Offset of end of valid region. */
100 struct workhead b_dep; /* List of filesystem dependencies. */
101 };
102
103 /*
104 * bufq
105 * flexible buffer queue routines
106 */
107 struct bufq {
108 void (*bufq_free)(struct bufq *);
109 void (*bufq_add)(struct bufq *, struct buf *);
110 struct buf *(*bufq_get)(struct bufq *);
111 };
112
113 struct bufq_default {
114 struct bufq bufq;
115 struct buf bufq_head[3];
116 };
117
118 #define BUFQ_ALLOC(_type) bufq_default_alloc() /* XXX */
119 #define BUFQ_FREE(_bufq) (_bufq)->bufq_free(_bufq)
120 #define BUFQ_ADD(_bufq, _bp) (_bufq)->bufq_add(_bufq, _bp)
121 #define BUFQ_GET(_bufq) (_bufq)->bufq_get(_bufq)
122
123 struct bufq *bufq_default_alloc(void);
124 void bufq_default_free(struct bufq *);
125 void bufq_default_add(struct bufq *, struct buf *);
126 struct buf *bufq_default_get(struct bufq *);
127
128 /*
129 * For portability with historic industry practice, the cylinder number has
130 * to be maintained in the `b_resid' field.
131 */
132 #define b_cylinder b_resid /* Cylinder number for disksort(). */
133
134 /* Device driver compatibility definitions. */
135 #define b_active b_bcount /* Driver queue head: drive active. */
136 #define b_errcnt b_resid /* Retry count while I/O in progress. */
137
138 /*
139 * These flags are kept in b_flags.
140 */
141 #define B_AGE 0x00000001 /* Move to age queue when I/O done. */
142 #define B_NEEDCOMMIT 0x00000002 /* Needs committing to stable storage */
143 #define B_ASYNC 0x00000004 /* Start I/O, do not wait. */
144 #define B_BAD 0x00000008 /* Bad block revectoring in progress. */
145 #define B_BUSY 0x00000010 /* I/O in progress. */
146 #define B_CACHE 0x00000020 /* Bread found us in the cache. */
147 #define B_CALL 0x00000040 /* Call b_iodone from biodone. */
148 #define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */
149 #define B_DONE 0x00000200 /* I/O completed. */
150 #define B_EINTR 0x00000400 /* I/O was interrupted */
151 #define B_ERROR 0x00000800 /* I/O error occurred. */
152 #define B_INVAL 0x00002000 /* Does not contain valid info. */
153 #define B_NOCACHE 0x00008000 /* Do not cache block after use. */
154 #define B_PHYS 0x00040000 /* I/O to user memory. */
155 #define B_RAW 0x00080000 /* Set by physio for raw transfers. */
156 #define B_READ 0x00100000 /* Read buffer. */
157 #define B_WANTED 0x00800000 /* Process wants this buffer. */
158 #define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */
159 #define B_WRITEINPROG 0x01000000 /* Write in progress. */
160 #define B_XXX 0x02000000 /* Debugging flag. */
161 #define B_DEFERRED 0x04000000 /* Skipped over for cleaning */
162 #define B_SCANNED 0x08000000 /* Block already pushed during sync */
163 #define B_PDAEMON 0x10000000 /* I/O started by pagedaemon */
164
165 #define B_BITS "\010\001AGE\002NEEDCOMMIT\003ASYNC\004BAD\005BUSY\006CACHE" \
166 "\007CALL\010DELWRI\012DONE\013EINTR\014ERROR" \
167 "\016INVAL\020NOCACHE\023PHYS\024RAW\025READ" \
168 "\030WANTED\031WRITEINPROG\032XXX\033DEFERRED" \
169 "\034SCANNED\035PDAEMON"
170
171 /*
172 * This structure describes a clustered I/O. It is stored in the b_saveaddr
173 * field of the buffer on which I/O is done. At I/O completion, cluster
174 * callback uses the structure to parcel I/O's to individual buffers, and
175 * then free's this structure.
176 */
177 struct cluster_save {
178 long bs_bcount; /* Saved b_bcount. */
179 long bs_bufsize; /* Saved b_bufsize. */
180 void *bs_saveaddr; /* Saved b_addr. */
181 int bs_nchildren; /* Number of associated buffers. */
182 struct buf **bs_children; /* List of associated buffers. */
183 };
184
185 /*
186 * Zero out the buffer's data area.
187 */
188 #define clrbuf(bp) { \
189 bzero((bp)->b_data, (u_int)(bp)->b_bcount); \
190 (bp)->b_resid = 0; \
191 }
192
193
194 /* Flags to low-level allocation routines. */
195 #define B_CLRBUF 0x01 /* Request allocated buffer be cleared. */
196 #define B_SYNC 0x02 /* Do all allocations synchronously. */
197
198 struct cluster_info {
199 daddr64_t ci_lastr; /* last read (read-ahead) */
200 daddr64_t ci_lastw; /* last write (write cluster) */
201 daddr64_t ci_cstart; /* start block of cluster */
202 daddr64_t ci_lasta; /* last allocation */
203 int ci_clen; /* length of current cluster */
204 int ci_ralen; /* Read-ahead length */
205 daddr64_t ci_maxra; /* last readahead block */
206 };
207
208 #ifdef _KERNEL
209 __BEGIN_DECLS
210 extern int bufpages; /* Max number of pages for buffers' data */
211 extern struct pool bufpool;
212 extern struct bufhead bufhead;
213
214 void bawrite(struct buf *);
215 void bdwrite(struct buf *);
216 void biodone(struct buf *);
217 int biowait(struct buf *);
218 int bread(struct vnode *, daddr64_t, int, struct ucred *, struct buf **);
219 int breadn(struct vnode *, daddr64_t, int, daddr64_t *, int *, int,
220 struct ucred *, struct buf **);
221 void brelse(struct buf *);
222 void bremfree(struct buf *);
223 void bufinit(void);
224 void buf_dirty(struct buf *);
225 void buf_undirty(struct buf *);
226 int bwrite(struct buf *);
227 struct buf *getblk(struct vnode *, daddr64_t, int, int, int);
228 struct buf *geteblk(int);
229 struct buf *incore(struct vnode *, daddr64_t);
230
231 void minphys(struct buf *bp);
232 int physio(void (*strategy)(struct buf *), struct buf *bp, dev_t dev,
233 int flags, void (*minphys)(struct buf *), struct uio *uio);
234 void brelvp(struct buf *);
235 void reassignbuf(struct buf *);
236 void bgetvp(struct vnode *, struct buf *);
237
238 void buf_replacevnode(struct buf *, struct vnode *);
239 void buf_daemon(struct proc *);
240 void buf_replacevnode(struct buf *, struct vnode *);
241 void buf_daemon(struct proc *);
242 int bread_cluster(struct vnode *, daddr64_t, int, struct buf **);
243
244 #ifdef DEBUG
245 void buf_print(struct buf *);
246 #endif
247
248 static __inline void
249 buf_start(struct buf *bp)
250 {
251 if (bioops.io_start)
252 (*bioops.io_start)(bp);
253 }
254
255 static __inline void
256 buf_complete(struct buf *bp)
257 {
258 if (bioops.io_complete)
259 (*bioops.io_complete)(bp);
260 }
261
262 static __inline void
263 buf_deallocate(struct buf *bp)
264 {
265 if (bioops.io_deallocate)
266 (*bioops.io_deallocate)(bp);
267 }
268
269 static __inline void
270 buf_movedeps(struct buf *bp, struct buf *bp2)
271 {
272 if (bioops.io_movedeps)
273 (*bioops.io_movedeps)(bp, bp2);
274 }
275
276 static __inline int
277 buf_countdeps(struct buf *bp, int i, int islocked)
278 {
279 if (bioops.io_countdeps)
280 return ((*bioops.io_countdeps)(bp, i, islocked));
281 else
282 return (0);
283 }
284
285 void cluster_write(struct buf *, struct cluster_info *, u_quad_t);
286
287 __END_DECLS
288 #endif
289 #endif /* !_SYS_BUF_H_ */