Fixed: Generate chunked playlist correctly, don't sort <All tracks>

entry and don't return a NULL file pointer and crash.


git-svn-id: svn://svn.rockbox.org/rockbox/trunk@9733 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Miika Pekkarinen 2006-04-19 18:56:59 +00:00
parent 5d9eccd85b
commit f9bfd73a24
2 changed files with 26 additions and 9 deletions

View file

@ -365,6 +365,9 @@ static long tagcache_get_seek(const struct tagcache_search *tcs,
#ifdef HAVE_TC_RAMCACHE
if (tcs->ramsearch)
{
if (hdr->indices[idxid].flag & FLAG_DELETED)
return false;
return hdr->indices[idxid].tag_seek[tag];
}
#endif
@ -437,7 +440,11 @@ static bool build_lookup_list(struct tagcache_search *tcs)
{
if (tcs->seek_list_count == SEEK_LIST_SIZE)
break ;
/* Skip deleted files. */
if (hdr->indices[i].flag & FLAG_DELETED)
continue;
/* Go through all filters.. */
for (j = 0; j < tcs->filter_count; j++)
{
@ -2145,6 +2152,8 @@ static bool load_tagcache(void)
{
logf("Entry no longer valid.");
logf("-> %s", buf);
/* FIXME: Properly delete the entry. */
hdr->indices[hdr->entry_count[i]].flag |= FLAG_DELETED;
continue ;
}
@ -2408,7 +2417,7 @@ static void tagcache_thread(void)
int tagcache_get_progress(void)
{
int total_count = processed_dir_count;
int total_count = -1;
#ifdef HAVE_DIRCACHE
if (dircache_is_enabled())
@ -2428,6 +2437,11 @@ int tagcache_get_progress(void)
return processed_dir_count * 100 / total_count;
}
int tagcache_get_processes_entrycount(void)
{
return processed_dir_count;
}
void tagcache_start_scan(void)
{
queue_post(&tagcache_queue, Q_START_SCAN, 0);

View file

@ -284,7 +284,7 @@ static bool parse_search(struct search_instruction *inst, const char *str)
}
static struct tagcache_search tcs;
static struct tagcache_search tcs, tcs2;
static int compare(const void *p1, const void *p2)
{
@ -386,6 +386,7 @@ int retrieve_entries(struct tree_context *c, struct tagcache_search *tcs,
int i;
int namebufused = 0;
int total_count = 0;
int special_entry_count = 0;
int extra = c->currextra;
int tag;
bool sort = false;
@ -437,9 +438,11 @@ int retrieve_entries(struct tree_context *c, struct tagcache_search *tcs,
dptr++;
current_entry_count++;
}
total_count++;
special_entry_count++;
}
total_count += special_entry_count;
while (tagcache_get_next(tcs))
{
if (total_count++ < offset)
@ -510,7 +513,9 @@ int retrieve_entries(struct tree_context *c, struct tagcache_search *tcs,
}
if (sort)
qsort(c->dircache, current_entry_count, c->dentry_size, compare);
qsort(c->dircache + special_entry_count * c->dentry_size,
current_entry_count - special_entry_count,
c->dentry_size, compare);
if (!init)
{
@ -694,9 +699,6 @@ void tagtree_exit(struct tree_context* c)
c->currtable = c->table_history[c->dirlevel];
c->currextra = c->extra_history[c->dirlevel];
c->firstpos = c->pos_history[c->dirlevel];
/* Just to be sure when chunked browsing is used. */
tagcache_search_finish(&tcs);
}
int tagtree_get_filename(struct tree_context* c, char *buf, int buflen)
@ -775,9 +777,10 @@ struct tagentry* tagtree_get_entry(struct tree_context *c, int id)
/* Load the next chunk if necessary. */
if (realid >= current_entry_count || realid < 0)
{
if (retrieve_entries(c, &tcs, MAX(0, id - (current_entry_count / 2)),
if (retrieve_entries(c, &tcs2, MAX(0, id - (current_entry_count / 2)),
false) < 0)
{
logf("retrieve failed");
return NULL;
}
realid = id - current_offset;