|
|
|
@ -0,0 +1,565 @@
|
|
|
|
|
#define __LIBRARY__
|
|
|
|
|
/*
|
|
|
|
|
* linux/fs/buffer.c
|
|
|
|
|
*
|
|
|
|
|
* (C) 1991 Linus Torvalds
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* 'buffer.c' implements the buffer-cache functions. Race-conditions have
|
|
|
|
|
* been avoided by NEVER letting a interrupt change a buffer (except for the
|
|
|
|
|
* data, of course), but instead letting the caller do it. NOTE! As interrupts
|
|
|
|
|
* can wake up a caller, some cli-sti sequences are needed to check for
|
|
|
|
|
* sleep-on-calls. These should be extremely quick, though (I hope).
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* NOTE! There is one discordant note here: checking floppies for
|
|
|
|
|
* disk change. This is where it fits best, I think, as it should
|
|
|
|
|
* invalidate changed floppy-disk-caches.
|
|
|
|
|
*/
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <stdarg.h>
|
|
|
|
|
#include <string.h>
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
#include <linux/config.h>
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
#include <asm/system.h>
|
|
|
|
|
#include <asm/io.h>
|
|
|
|
|
#include <asm/segment.h>
|
|
|
|
|
|
|
|
|
|
#define BUF_MAX 4096
|
|
|
|
|
#define DIRBUF 8192
|
|
|
|
|
#define MAX_ARG_PAGES 32
|
|
|
|
|
#define NAME_LEN 14
|
|
|
|
|
|
|
|
|
|
extern void put_super(int dev);
|
|
|
|
|
extern void invalidate_inodes(int dev);
|
|
|
|
|
|
|
|
|
|
extern int end;
|
|
|
|
|
struct buffer_head * start_buffer = (struct buffer_head *) &end;
|
|
|
|
|
struct buffer_head * hash_table[NR_HASH];
|
|
|
|
|
static struct buffer_head * free_list;
|
|
|
|
|
static struct task_struct * buffer_wait = NULL;
|
|
|
|
|
int NR_BUFFERS = 0;
|
|
|
|
|
|
|
|
|
|
static inline void wait_on_buffer(struct buffer_head * bh)
|
|
|
|
|
{
|
|
|
|
|
cli();
|
|
|
|
|
while (bh->b_lock)
|
|
|
|
|
sleep_on(&bh->b_wait);
|
|
|
|
|
sti();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sys_sync(void)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
struct buffer_head * bh;
|
|
|
|
|
|
|
|
|
|
sync_inodes(); /* write out inodes into buffers */
|
|
|
|
|
bh = start_buffer;
|
|
|
|
|
for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_dirt)
|
|
|
|
|
ll_rw_block(WRITE,bh);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sync_dev(int dev)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
struct buffer_head * bh;
|
|
|
|
|
|
|
|
|
|
bh = start_buffer;
|
|
|
|
|
for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
|
|
|
|
|
if (bh->b_dev != dev)
|
|
|
|
|
continue;
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_dev == dev && bh->b_dirt)
|
|
|
|
|
ll_rw_block(WRITE,bh);
|
|
|
|
|
}
|
|
|
|
|
sync_inodes();
|
|
|
|
|
bh = start_buffer;
|
|
|
|
|
for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
|
|
|
|
|
if (bh->b_dev != dev)
|
|
|
|
|
continue;
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_dev == dev && bh->b_dirt)
|
|
|
|
|
ll_rw_block(WRITE,bh);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void inline invalidate_buffers(int dev)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
struct buffer_head * bh;
|
|
|
|
|
|
|
|
|
|
bh = start_buffer;
|
|
|
|
|
for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
|
|
|
|
|
if (bh->b_dev != dev)
|
|
|
|
|
continue;
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_dev == dev)
|
|
|
|
|
bh->b_uptodate = bh->b_dirt = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This routine checks whether a floppy has been changed, and
|
|
|
|
|
* invalidates all buffer-cache-entries in that case. This
|
|
|
|
|
* is a relatively slow routine, so we have to try to minimize using
|
|
|
|
|
* it. Thus it is called only upon a 'mount' or 'open'. This
|
|
|
|
|
* is the best way of combining speed and utility, I think.
|
|
|
|
|
* People changing diskettes in the middle of an operation deserve
|
|
|
|
|
* to loose :-)
|
|
|
|
|
*
|
|
|
|
|
* NOTE! Although currently this is only for floppies, the idea is
|
|
|
|
|
* that any additional removable block-device will use this routine,
|
|
|
|
|
* and that mount/open needn't know that floppies/whatever are
|
|
|
|
|
* special.
|
|
|
|
|
*/
|
|
|
|
|
void check_disk_change(int dev)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (MAJOR(dev) != 2)
|
|
|
|
|
return;
|
|
|
|
|
if (!floppy_change(dev & 0x03))
|
|
|
|
|
return;
|
|
|
|
|
for (i=0 ; i<NR_SUPER ; i++)
|
|
|
|
|
if (super_block[i].s_dev == dev)
|
|
|
|
|
put_super(super_block[i].s_dev);
|
|
|
|
|
invalidate_inodes(dev);
|
|
|
|
|
invalidate_buffers(dev);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
|
|
|
|
|
#define hash(dev,block) hash_table[_hashfn(dev,block)]
|
|
|
|
|
|
|
|
|
|
static inline void remove_from_queues(struct buffer_head * bh)
|
|
|
|
|
{
|
|
|
|
|
/* remove from hash-queue */
|
|
|
|
|
if (bh->b_next)
|
|
|
|
|
bh->b_next->b_prev = bh->b_prev;
|
|
|
|
|
if (bh->b_prev)
|
|
|
|
|
bh->b_prev->b_next = bh->b_next;
|
|
|
|
|
if (hash(bh->b_dev,bh->b_blocknr) == bh)
|
|
|
|
|
hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
|
|
|
|
|
/* remove from free list */
|
|
|
|
|
if (!(bh->b_prev_free) || !(bh->b_next_free))
|
|
|
|
|
panic("Free block list corrupted");
|
|
|
|
|
bh->b_prev_free->b_next_free = bh->b_next_free;
|
|
|
|
|
bh->b_next_free->b_prev_free = bh->b_prev_free;
|
|
|
|
|
if (free_list == bh)
|
|
|
|
|
free_list = bh->b_next_free;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void insert_into_queues(struct buffer_head * bh)
|
|
|
|
|
{
|
|
|
|
|
/* put at end of free list */
|
|
|
|
|
bh->b_next_free = free_list;
|
|
|
|
|
bh->b_prev_free = free_list->b_prev_free;
|
|
|
|
|
free_list->b_prev_free->b_next_free = bh;
|
|
|
|
|
free_list->b_prev_free = bh;
|
|
|
|
|
/* put the buffer in new hash-queue if it has a device */
|
|
|
|
|
bh->b_prev = NULL;
|
|
|
|
|
bh->b_next = NULL;
|
|
|
|
|
if (!bh->b_dev)
|
|
|
|
|
return;
|
|
|
|
|
bh->b_next = hash(bh->b_dev,bh->b_blocknr);
|
|
|
|
|
hash(bh->b_dev,bh->b_blocknr) = bh;
|
|
|
|
|
bh->b_next->b_prev = bh;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct buffer_head * find_buffer(int dev, int block)
|
|
|
|
|
{
|
|
|
|
|
struct buffer_head * tmp;
|
|
|
|
|
|
|
|
|
|
for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
|
|
|
|
|
if (tmp->b_dev==dev && tmp->b_blocknr==block)
|
|
|
|
|
return tmp;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Why like this, I hear you say... The reason is race-conditions.
|
|
|
|
|
* As we don't lock buffers (unless we are readint them, that is),
|
|
|
|
|
* something might happen to it while we sleep (ie a read-error
|
|
|
|
|
* will force it bad). This shouldn't really happen currently, but
|
|
|
|
|
* the code is ready.
|
|
|
|
|
*/
|
|
|
|
|
struct buffer_head * get_hash_table(int dev, int block)
|
|
|
|
|
{
|
|
|
|
|
struct buffer_head * bh;
|
|
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
|
if (!(bh=find_buffer(dev,block)))
|
|
|
|
|
return NULL;
|
|
|
|
|
bh->b_count++;
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_dev == dev && bh->b_blocknr == block)
|
|
|
|
|
return bh;
|
|
|
|
|
bh->b_count--;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Ok, this is getblk, and it isn't very clear, again to hinder
|
|
|
|
|
* race-conditions. Most of the code is seldom used, (ie repeating),
|
|
|
|
|
* so it should be much more efficient than it looks.
|
|
|
|
|
*
|
|
|
|
|
* The algoritm is changed: hopefully better, and an elusive bug removed.
|
|
|
|
|
*/
|
|
|
|
|
#define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
|
|
|
|
|
struct buffer_head * getblk(int dev,int block)
|
|
|
|
|
{
|
|
|
|
|
struct buffer_head * tmp, * bh;
|
|
|
|
|
|
|
|
|
|
repeat:
|
|
|
|
|
if (bh = get_hash_table(dev,block))
|
|
|
|
|
return bh;
|
|
|
|
|
tmp = free_list;
|
|
|
|
|
do {
|
|
|
|
|
if (tmp->b_count)
|
|
|
|
|
continue;
|
|
|
|
|
if (!bh || BADNESS(tmp)<BADNESS(bh)) {
|
|
|
|
|
bh = tmp;
|
|
|
|
|
if (!BADNESS(tmp))
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
/* and repeat until we find something good */
|
|
|
|
|
} while ((tmp = tmp->b_next_free) != free_list);
|
|
|
|
|
if (!bh) {
|
|
|
|
|
sleep_on(&buffer_wait);
|
|
|
|
|
goto repeat;
|
|
|
|
|
}
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_count)
|
|
|
|
|
goto repeat;
|
|
|
|
|
while (bh->b_dirt) {
|
|
|
|
|
sync_dev(bh->b_dev);
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_count)
|
|
|
|
|
goto repeat;
|
|
|
|
|
}
|
|
|
|
|
/* NOTE!! While we slept waiting for this block, somebody else might */
|
|
|
|
|
/* already have added "this" block to the cache. check it */
|
|
|
|
|
if (find_buffer(dev,block))
|
|
|
|
|
goto repeat;
|
|
|
|
|
/* OK, FINALLY we know that this buffer is the only one of it's kind, */
|
|
|
|
|
/* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
|
|
|
|
|
bh->b_count=1;
|
|
|
|
|
bh->b_dirt=0;
|
|
|
|
|
bh->b_uptodate=0;
|
|
|
|
|
remove_from_queues(bh);
|
|
|
|
|
bh->b_dev=dev;
|
|
|
|
|
bh->b_blocknr=block;
|
|
|
|
|
insert_into_queues(bh);
|
|
|
|
|
return bh;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void brelse(struct buffer_head * buf)
|
|
|
|
|
{
|
|
|
|
|
if (!buf)
|
|
|
|
|
return;
|
|
|
|
|
wait_on_buffer(buf);
|
|
|
|
|
if (!(buf->b_count--))
|
|
|
|
|
panic("Trying to free free buffer");
|
|
|
|
|
wake_up(&buffer_wait);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* bread() reads a specified block and returns the buffer that contains
|
|
|
|
|
* it. It returns NULL if the block was unreadable.
|
|
|
|
|
*/
|
|
|
|
|
struct buffer_head * bread(int dev,int block)
|
|
|
|
|
{
|
|
|
|
|
struct buffer_head * bh;
|
|
|
|
|
|
|
|
|
|
if (!(bh=getblk(dev,block)))
|
|
|
|
|
panic("bread: getblk returned NULL\n");
|
|
|
|
|
if (bh->b_uptodate)
|
|
|
|
|
return bh;
|
|
|
|
|
ll_rw_block(READ,bh);
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_uptodate)
|
|
|
|
|
return bh;
|
|
|
|
|
brelse(bh);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define COPYBLK(from,to) \
|
|
|
|
|
__asm__("cld\n\t" \
|
|
|
|
|
"rep\n\t" \
|
|
|
|
|
"movsl\n\t" \
|
|
|
|
|
::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* bread_page reads four buffers into memory at the desired address. It's
|
|
|
|
|
* a function of its own, as there is some speed to be got by reading them
|
|
|
|
|
* all at the same time, not waiting for one to be read, and then another
|
|
|
|
|
* etc.
|
|
|
|
|
*/
|
|
|
|
|
void bread_page(unsigned long address,int dev,int b[4])
|
|
|
|
|
{
|
|
|
|
|
struct buffer_head * bh[4];
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i=0 ; i<4 ; i++)
|
|
|
|
|
if (b[i]) {
|
|
|
|
|
if (bh[i] = getblk(dev,b[i]))
|
|
|
|
|
if (!bh[i]->b_uptodate)
|
|
|
|
|
ll_rw_block(READ,bh[i]);
|
|
|
|
|
} else
|
|
|
|
|
bh[i] = NULL;
|
|
|
|
|
for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
|
|
|
|
|
if (bh[i]) {
|
|
|
|
|
wait_on_buffer(bh[i]);
|
|
|
|
|
if (bh[i]->b_uptodate)
|
|
|
|
|
COPYBLK((unsigned long) bh[i]->b_data,address);
|
|
|
|
|
brelse(bh[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Ok, breada can be used as bread, but additionally to mark other
|
|
|
|
|
* blocks for reading as well. End the argument list with a negative
|
|
|
|
|
* number.
|
|
|
|
|
*/
|
|
|
|
|
struct buffer_head * breada(int dev,int first, ...)
|
|
|
|
|
{
|
|
|
|
|
va_list args;
|
|
|
|
|
struct buffer_head * bh, *tmp;
|
|
|
|
|
|
|
|
|
|
va_start(args,first);
|
|
|
|
|
if (!(bh=getblk(dev,first)))
|
|
|
|
|
panic("bread: getblk returned NULL\n");
|
|
|
|
|
if (!bh->b_uptodate)
|
|
|
|
|
ll_rw_block(READ,bh);
|
|
|
|
|
while ((first=va_arg(args,int))>=0) {
|
|
|
|
|
tmp=getblk(dev,first);
|
|
|
|
|
if (tmp) {
|
|
|
|
|
if (!tmp->b_uptodate)
|
|
|
|
|
ll_rw_block(READA,bh);
|
|
|
|
|
tmp->b_count--;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
va_end(args);
|
|
|
|
|
wait_on_buffer(bh);
|
|
|
|
|
if (bh->b_uptodate)
|
|
|
|
|
return bh;
|
|
|
|
|
brelse(bh);
|
|
|
|
|
return (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void buffer_init(long buffer_end)
|
|
|
|
|
{
|
|
|
|
|
struct buffer_head * h = start_buffer;
|
|
|
|
|
void * b;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (buffer_end == 1<<20)
|
|
|
|
|
b = (void *) (640*1024);
|
|
|
|
|
else
|
|
|
|
|
b = (void *) buffer_end;
|
|
|
|
|
while ( (b -= BLOCK_SIZE) >= ((void *) (h+1)) ) {
|
|
|
|
|
h->b_dev = 0;
|
|
|
|
|
h->b_dirt = 0;
|
|
|
|
|
h->b_count = 0;
|
|
|
|
|
h->b_lock = 0;
|
|
|
|
|
h->b_uptodate = 0;
|
|
|
|
|
h->b_wait = NULL;
|
|
|
|
|
h->b_next = NULL;
|
|
|
|
|
h->b_prev = NULL;
|
|
|
|
|
h->b_data = (char *) b;
|
|
|
|
|
h->b_prev_free = h-1;
|
|
|
|
|
h->b_next_free = h+1;
|
|
|
|
|
h++;
|
|
|
|
|
NR_BUFFERS++;
|
|
|
|
|
if (b == (void *) 0x100000)
|
|
|
|
|
b = (void *) 0xA0000;
|
|
|
|
|
}
|
|
|
|
|
h--;
|
|
|
|
|
free_list = start_buffer;
|
|
|
|
|
free_list->b_prev_free = h;
|
|
|
|
|
h->b_next_free = free_list;
|
|
|
|
|
for (i=0;i<NR_HASH;i++)
|
|
|
|
|
hash_table[i]=NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//mygetdents
|
|
|
|
|
/*<2A><><EFBFBD>建<EFBFBD><E5BBBA><EFBFBD><EFBFBD><EFBFBD>ṹ*/
|
|
|
|
|
struct linux_dirent
|
|
|
|
|
{
|
|
|
|
|
long d_ino;
|
|
|
|
|
off_t d_off;
|
|
|
|
|
unsigned short d_reclen;
|
|
|
|
|
char d_name[NAME_LEN+1];
|
|
|
|
|
};
|
|
|
|
|
int sys_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int
|
|
|
|
|
count)
|
|
|
|
|
{
|
|
|
|
|
struct file* file;
|
|
|
|
|
struct linux_dirent mydir;
|
|
|
|
|
struct dir_entry* dir;
|
|
|
|
|
struct m_inode* inode;
|
|
|
|
|
struct buffer_head* bh;
|
|
|
|
|
char* buf;
|
|
|
|
|
int i, j, dsize, ldsize;
|
|
|
|
|
int num = 0;
|
|
|
|
|
/*dirp<72><70>СΪ0*/
|
|
|
|
|
if (!count)
|
|
|
|
|
return -1;
|
|
|
|
|
file = current->filp[fd];//<2F><>fd<66><64><EFBFBD><EFBFBD>filp<6C><70><EFBFBD><EFBFBD>õ<EFBFBD><C3B5><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ļ<EFBFBD>file
|
|
|
|
|
inode = file->f_inode;//ͨ<><CDA8>file<6C>õ<EFBFBD><C3B5><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڵ<EFBFBD>f_inode
|
|
|
|
|
bh = bread(inode->i_dev, inode->i_zone[0]);//<2F><><EFBFBD><EFBFBD>bread<61><64><EFBFBD>豸<EFBFBD><E8B1B8>ȡ<EFBFBD><C8A1><EFBFBD>ļ<EFBFBD><C4BC><EFBFBD><EFBFBD>ݶ<EFBFBD>ӦӲ<D3A6><D3B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ݣ<EFBFBD>
|
|
|
|
|
//<2F><><EFBFBD><EFBFBD>buffer_head<61>ṹ<EFBFBD><E1B9B9>ָ<EFBFBD><D6B8>bh
|
|
|
|
|
dsize = sizeof(struct dir_entry);//<2F><>ȡdir_entry<72>ṹ<EFBFBD><E1B9B9>Ĵ<EFBFBD>С
|
|
|
|
|
ldsize = sizeof(struct linux_dirent);//<2F><>ȡlinux_dirent<6E>ṹ<EFBFBD><E1B9B9>Ĵ<EFBFBD>С
|
|
|
|
|
for (j = 0; j < inode->i_size; j += dsize)
|
|
|
|
|
{
|
|
|
|
|
if (num + ldsize >= count)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
dir = (struct dir_entry*)(bh->b_data + j);
|
|
|
|
|
if (dir->inode)
|
|
|
|
|
{
|
|
|
|
|
mydir.d_ino = dir->inode;
|
|
|
|
|
for (i = 0; i < NAME_LEN; i++)
|
|
|
|
|
mydir.d_name[i] = dir->name[i];
|
|
|
|
|
mydir.d_off = 0;
|
|
|
|
|
mydir.d_reclen = sizeof(mydir);
|
|
|
|
|
buf = &mydir;
|
|
|
|
|
for (i = 0; i < mydir.d_reclen; i++)
|
|
|
|
|
{
|
|
|
|
|
put_fs_byte(*(buf + i), ((char*)dirp) + i + num);
|
|
|
|
|
}
|
|
|
|
|
num += mydir.d_reclen;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
return num;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//mygetcwd
|
|
|
|
|
typedef struct
|
|
|
|
|
{
|
|
|
|
|
int d_fd;
|
|
|
|
|
int d_off;
|
|
|
|
|
int d_size;
|
|
|
|
|
char* d_buf;
|
|
|
|
|
} DIR;
|
|
|
|
|
|
|
|
|
|
struct direct {
|
|
|
|
|
ino_t d_ino;
|
|
|
|
|
char d_name[NAME_LEN];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct linux_dirent res;
|
|
|
|
|
_syscall3(int, read, int, fildes, char*, buf, off_t, count)
|
|
|
|
|
_syscall2(int, fstat, int, fildes, struct stat*, stat_buf)
|
|
|
|
|
_syscall2(int, stat, const char*, filename, struct stat*, stat_buf)
|
|
|
|
|
_syscall1(int, chdir, const char*, filename)
|
|
|
|
|
struct linux_dirent* readdir(DIR* dir)
|
|
|
|
|
{
|
|
|
|
|
struct direct* ptr;
|
|
|
|
|
|
|
|
|
|
if (!dir) {
|
|
|
|
|
errno = EBADF;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
if (!dir->d_buf)
|
|
|
|
|
if (!(dir->d_buf = malloc(DIRBUF)))
|
|
|
|
|
return NULL;
|
|
|
|
|
else
|
|
|
|
|
dir->d_size = dir->d_off = 0;
|
|
|
|
|
while (1) {
|
|
|
|
|
if (dir->d_size <= dir->d_off) {
|
|
|
|
|
dir->d_off = 0;
|
|
|
|
|
dir->d_size = read(dir->d_fd, dir->d_buf, DIRBUF);
|
|
|
|
|
}
|
|
|
|
|
if (dir->d_size <= 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
ptr = (struct direct*)(dir->d_off + dir->d_buf);
|
|
|
|
|
dir->d_off += sizeof(*ptr);
|
|
|
|
|
if (!ptr->d_ino)
|
|
|
|
|
continue;
|
|
|
|
|
res.d_ino = ptr->d_ino;
|
|
|
|
|
strncpy(res.d_name, ptr->d_name, NAME_LEN);
|
|
|
|
|
res.d_name[NAME_LEN] = 0;
|
|
|
|
|
res.d_reclen = strlen(res.d_name);
|
|
|
|
|
return &res;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
DIR* opendir(const char* dirname)
|
|
|
|
|
{
|
|
|
|
|
int fd;
|
|
|
|
|
struct stat stat_buf;
|
|
|
|
|
DIR* ptr;
|
|
|
|
|
|
|
|
|
|
if ((fd = open(dirname, O_RDONLY)) < 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
if (fstat(fd, &stat_buf) < 0 ||
|
|
|
|
|
!S_ISDIR(stat_buf.st_mode) ||
|
|
|
|
|
!(ptr = malloc(sizeof(*ptr)))) {
|
|
|
|
|
close(fd);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
memset(ptr, 0, sizeof(*ptr));
|
|
|
|
|
ptr->d_fd = fd;
|
|
|
|
|
return ptr;
|
|
|
|
|
}
|
|
|
|
|
int closedir(DIR* dir)
|
|
|
|
|
{
|
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
|
|
if (!dir) {
|
|
|
|
|
errno = EBADF;
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
fd = dir->d_fd;
|
|
|
|
|
free(dir->d_buf);
|
|
|
|
|
free(dir);
|
|
|
|
|
return close(fd);
|
|
|
|
|
}
|
|
|
|
|
char* sys_getcwd(char* buf, size_t size) {
|
|
|
|
|
char path[BUF_MAX], cwd[BUF_MAX];
|
|
|
|
|
DIR* dirp;
|
|
|
|
|
struct linux_dirent* dp;
|
|
|
|
|
struct stat state0, state1, state2;
|
|
|
|
|
dev_t dev;
|
|
|
|
|
ino_t ino;
|
|
|
|
|
while (1) {
|
|
|
|
|
if (stat(".", &state0 == 0)) {
|
|
|
|
|
dev = state0.st_dev;
|
|
|
|
|
ino = state0.st_ino;
|
|
|
|
|
}
|
|
|
|
|
dirp = opendir("..");
|
|
|
|
|
stat("..", &state2);
|
|
|
|
|
if (state2.st_dev == dev && state2.st_ino == ino)
|
|
|
|
|
break;
|
|
|
|
|
while ((dp = readdir(dirp)) != NULL) {
|
|
|
|
|
memcpy(path, dp->d_name, BUF_MAX);
|
|
|
|
|
stat(path, &state1);
|
|
|
|
|
if (dev == state1.st_dev && ino == state1.st_ino) {
|
|
|
|
|
memset(cwd, 0, sizeof(cwd));
|
|
|
|
|
strcat(cwd, "/");
|
|
|
|
|
strcat(cwd, dp->d_name);
|
|
|
|
|
strcat(cwd, buf);
|
|
|
|
|
strncpy(buf, cwd, BUF_MAX);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
closedir(dirp);
|
|
|
|
|
chdir("..");
|
|
|
|
|
}
|
|
|
|
|
return buf;
|
|
|
|
|
}
|