6.23.8.1. Run the passes

At present, we just run the number of passes specified as the limit. This will change when convergence testing is implemented.

The pass loop moves the identifier list, input and output file lists, and table of contents from the pass frame to the master frame after each pass. This permits this data to be read by the next pass (while it is also regenerating the data for the pass after that).

Conceptually, the loop is said to converge when this data is the same twice in a row. However we do not test this yet because the state information is not complete.

To be complete, it must determine whether the input and output files have changed between passes. This will be done by comparing the file lists, and comparing the last modification dates on the files between passes.

Finally, output to simple files will be ignored. It is necessary to also add user controlled hooks into the convergence tests, because user script can have arbitrary side effects.

Comienzo python section to interscript/frames/masterf.py[2 /2 ] Previo Primero
   170: #line 215 "master_frame.ipk"
   171:   def run_passes(self, skiplist):
   172:     #print 'STARTING PASSES'
   173:     for passno in range(self.passes):
   174:       converged = self.process_pass(passno, skiplist)
   175:       if converged: break
   176:     #print 'FINISHED PASSES'
   177:     self.persistent_frames['options']=self.argument_frame.__dict__
   178:     self.persistent_frames['include files']=self.include_files
   179:     self.persistent_frames['converged']=converged
   180:     if self.usecache:
   181:       try:
   182:         #print 'WRITING CACHE'
   183:         cache = self.platform.open(self.cache_name,'w')
   184:         pickle.dump(self.persistent_frames, cache)
   185:         cache.close()
   186:         del cache
   187:       except KeyboardInterrupt: raise
   188:       except:
   189:         print 'Pickle FAILURE saving cache',self.cache_name
   190:       if 'cache' in self.process.trace:
   191:         self.dump_cache()
   192: 
   193:   def __del__(self):
   194:     if 'frames' in self.process.trace:
   195:       self.process.release_object(self)
   196: 
   197:   def get_master_frame(self): return self
   198: 
   199:   def get_persistent_frame(self, seq):
   200:     if not self.persistent_frames.has_key(seq):
   201:       self.persistent_frames[seq]={}
   202:     return self.persistent_frames[seq]
   203: 
   204:   def set_title(self, title, **trlat):
   205:     self.persistent_frames['title'] = title
   206:     apply(add_translation,(title,),trlat)
   207: 
   208:   def add_author(self, author, **data):
   209:     if not self.persistent_frames.has_key('authors'):
   210:       self.persistent_frames['authors']={}
   211:     if not self.persistent_frames['authors'].has_key(author):
   212:       self.persistent_frames['authors'][author]={}
   213:     self.persistent_frames['authors'][author].update(data)
   214: 
   215:   def get_title(self):
   216:     return self.persistent_frames.get('title',None)
   217: 
   218:   def set_native_language(self, language):
   219:     self.persistent_frames['native_language']=language
   220: 
   221:   def get_native_language(self):
   222:     return self.persistent_frames.get('native_language','en')
   223: 
   224:   def set_document_data(self, key, data):
   225:     self.persistent_frames[key]=data
   226: 
   227:   def get_document_data(self,key):
   228:     return self.persistent_frames.get(key,None)
   229: 
   230:   def dump_cache(self):
   231:     print '--- CACHE DUMP ------------------------------',
   232:     self.dump_dict(self.persistent_frames, 0)
   233:     print
   234: 
   235:   def dump_sequence(self,s, level):
   236:     for entry in s[:-1]:
   237:       print
   238:       print ' ' * (level * 2),
   239:       self.dump_entry(entry,level)
   240:       print ',',
   241:     if len(s)>0:
   242:       print
   243:       print ' ' * (level * 2),
   244:       self.dump_entry(s[-1],level)
   245: 
   246:   def dump_dict(self,d, level):
   247:     keys = d.keys()
   248:     keys.sort()
   249:     for key in keys[:-1]:
   250:       print
   251:       if level == 0: print
   252:       print ' '*(level*2)+str(key),':',
   253:       v = d[key]
   254:       self.dump_entry(v, level)
   255:       print ',',
   256:     if len(keys)>0:
   257:       print
   258:       key = keys[-1]
   259:       print ' '*(level*2)+str(key),':',
   260:       v = d[key]
   261:       self.dump_entry(v, level)
   262: 
   263:   def dump_entry(self,e,level):
   264:       t = type(e)
   265:       if t is types.DictType:
   266:         print '<dict>',
   267:         self.dump_dict(e,level+1)
   268:       elif t is types.TupleType:
   269:         print '<tuple>',
   270:         self.dump_sequence(e, level+1)
   271:       elif t is types.ListType:
   272:         print '<list>',
   273:         self.dump_sequence(e, level+1)
   274:       else:
   275:         print repr(e),
   276: 
   277:   def process_pass(self, passno, skiplist):
   278:     curpass = pass_frame(self, passno, skiplist)
   279:     self.ids = curpass.ids        # idlist
   280:     self.ftp_list = curpass.ftp_list # ftp list
   281:     self.flist = curpass.flist    # output file list
   282:     self.iflist = curpass.iflist  # input file list
   283:     self.toc = curpass.toc        # table of contents
   284:     self.include_files = curpass.include_files # include files
   285:     self.classes = curpass.classes # classes
   286:     self.functions = curpass.functions # functions
   287:     self.tests = curpass.tests # functions
   288:     self.section_index = curpass.section_index # functions
   289: 
   290:     if self.sequence_limit == -1:
   291:       self.sequence_limit = curpass.sequence
   292:     elif self.sequence_limit != curpass.sequence:
   293:       print 'WARNING: SEQUENCE COUNTER DISPARITY BETWEEN PASSES'
   294:     fdict = curpass.fdict
   295:     del curpass
   296:     return self.check_convergence(passno, fdict)
   297: 
   298:   def check_convergence(self, passno, ds):
   299:       dd = self.fdict
   300: 
   301:       file_count = 0
   302:       stable_file_count = 0
   303:       unstable_file_count = 0
   304:       new_file_count = 0
   305:       for k in ds.keys():
   306:         file_count = file_count + 1
   307:         #print 'Checking file',file_count,':',k,'Status',ds[k]
   308:         if not dd.has_key(k):
   309:           dd[k]=(ds[k],passno)
   310: 
   311:         if ds[k]=='original':
   312:           new_file_count = new_file_count + 1
   313:         elif ds[k]=='unchanged':
   314:           stable_file_count = stable_file_count + 1
   315:         else:
   316:           unstable_file_count = unstable_file_count + 1
   317:         if ds[k]!='unchanged' or dd[k][0]!='unchanged':
   318:           dd[k]=(ds[k],passno)
   319:       converged = file_count == stable_file_count
   320:       if converged:
   321:         print 'All',file_count,'output files stable on pass',passno+1,' -- breaking'
   322:       else:
   323:         print 'Pass',passno+1,'status: <not converged>'
   324:         print '  Files    :',file_count
   325:         print '  New      :',new_file_count
   326:         print '  Changed  :',unstable_file_count
   327:         print '  Unchanged:',stable_file_count
   328:       return converged
   329: 
   330: 
End python section to interscript/frames/masterf.py[2]