Is it useless to use the Lock() in multiprocess Pool()? [duplicate] - multiprocessing

I am having troubles with the multiprocessing module. I am using a Pool of workers with its map method to concurrently analyze lots of files. Each time a file has been processed I would like to have a counter updated so that I can keep track of how many files remains to be processed. Here is sample code:
import os
import multiprocessing
counter = 0
def analyze(file):
# Analyze the file.
global counter
counter += 1
print counter
if __name__ == '__main__':
files = os.listdir('/some/directory')
pool = multiprocessing.Pool(4)
pool.map(analyze, files)
I cannot find a solution for this.

The problem is that the counter variable is not shared between your processes: each separate process is creating it's own local instance and incrementing that.
See this section of the documentation for some techniques you can employ to share state between your processes. In your case you might want to share a Value instance between your workers
Here's a working version of your example (with some dummy input data). Note it uses global values which I would really try to avoid in practice:
from multiprocessing import Pool, Value
from time import sleep
counter = None
def init(args):
''' store the counter for later use '''
global counter
counter = args
def analyze_data(args):
''' increment the global counter, do something with the input '''
global counter
# += operation is not atomic, so we need to get a lock:
with counter.get_lock():
counter.value += 1
print counter.value
return args * 10
if __name__ == '__main__':
#inputs = os.listdir(some_directory)
#
# initialize a cross-process counter and the input lists
#
counter = Value('i', 0)
inputs = [1, 2, 3, 4]
#
# create the pool of workers, ensuring each one receives the counter
# as it starts.
#
p = Pool(initializer = init, initargs = (counter, ))
i = p.map_async(analyze_data, inputs, chunksize = 1)
i.wait()
print i.get()

Counter class without the race-condition bug:
class Counter(object):
def __init__(self):
self.val = multiprocessing.Value('i', 0)
def increment(self, n=1):
with self.val.get_lock():
self.val.value += n
#property
def value(self):
return self.val.value

A extremly simple example, changed from jkp's answer:
from multiprocessing import Pool, Value
from time import sleep
counter = Value('i', 0)
def f(x):
global counter
with counter.get_lock():
counter.value += 1
print("counter.value:", counter.value)
sleep(1)
return x
with Pool(4) as p:
r = p.map(f, range(1000*1000))

Faster Counter class without using the built-in lock of Value twice
class Counter(object):
def __init__(self, initval=0):
self.val = multiprocessing.RawValue('i', initval)
self.lock = multiprocessing.Lock()
def increment(self):
with self.lock:
self.val.value += 1
#property
def value(self):
return self.val.value
https://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing
https://docs.python.org/2/library/multiprocessing.html#multiprocessing.sharedctypes.Value
https://docs.python.org/2/library/multiprocessing.html#multiprocessing.sharedctypes.RawValue

Here is a solution to your problem based on a different approach from that proposed in the other answers. It uses message passing with multiprocessing.Queue objects (instead of shared memory with multiprocessing.Value objects) and process-safe (atomic) built-in increment and decrement operators += and -= (instead of introducing custom increment and decrement methods) since you asked for it.
First, we define a class Subject for instantiating an object that will be local to the parent process and whose attributes are to be incremented or decremented:
import multiprocessing
class Subject:
def __init__(self):
self.x = 0
self.y = 0
Next, we define a class Proxy for instantiating an object that will be the remote proxy through which the child processes will request the parent process to retrieve or update the attributes of the Subject object. The interprocess communication will use two multiprocessing.Queue attributes, one for exchanging requests and one for exchanging responses. Requests are of the form (sender, action, *args) where sender is the sender name, action is the action name ('get', 'set', 'increment', or 'decrement' the value of an attribute), and args is the argument tuple. Responses are of the form value (to 'get' requests):
class Proxy(Subject):
def __init__(self, request_queue, response_queue):
self.__request_queue = request_queue
self.__response_queue = response_queue
def _getter(self, target):
sender = multiprocessing.current_process().name
self.__request_queue.put((sender, 'get', target))
return Decorator(self.__response_queue.get())
def _setter(self, target, value):
sender = multiprocessing.current_process().name
action = getattr(value, 'action', 'set')
self.__request_queue.put((sender, action, target, value))
#property
def x(self):
return self._getter('x')
#property
def y(self):
return self._getter('y')
#x.setter
def x(self, value):
self._setter('x', value)
#y.setter
def y(self, value):
self._setter('y', value)
Then, we define the class Decorator to decorate the int objects returned by the getters of a Proxy object in order to inform its setters whether the increment or decrement operators += and -= have been used by adding an action attribute, in which case the setters request an 'increment' or 'decrement' operation instead of a 'set' operation. The increment and decrement operators += and -= call the corresponding augmented assignment special methods __iadd__ and __isub__ if they are defined, and fall back on the assignment special methods __add__ and __sub__ which are always defined for int objects (e.g. proxy.x += value is equivalent to proxy.x = proxy.x.__iadd__(value) which is equivalent to proxy.x = type(proxy).x.__get__(proxy).__iadd__(value) which is equivalent to type(proxy).x.__set__(proxy, type(proxy).x.__get__(proxy).__iadd__(value))):
class Decorator(int):
def __iadd__(self, other):
value = Decorator(other)
value.action = 'increment'
return value
def __isub__(self, other):
value = Decorator(other)
value.action = 'decrement'
return value
Then, we define the function worker that will be run in the child processes and request the increment and decrement operations:
def worker(proxy):
proxy.x += 1
proxy.y -= 1
Finally, we define a single request queue to send requests to the parent process, and multiple response queues to send responses to the child processes:
if __name__ == '__main__':
subject = Subject()
request_queue = multiprocessing.Queue()
response_queues = {}
processes = []
for index in range(4):
sender = 'child {}'.format(index)
response_queues[sender] = multiprocessing.Queue()
proxy = Proxy(request_queue, response_queues[sender])
process = multiprocessing.Process(
target=worker, args=(proxy,), name=sender)
processes.append(process)
running = len(processes)
for process in processes:
process.start()
while subject.x != 4 or subject.y != -4:
sender, action, *args = request_queue.get()
print(sender, 'requested', action, *args)
if action == 'get':
response_queues[sender].put(getattr(subject, args[0]))
elif action == 'set':
setattr(subject, args[0], args[1])
elif action == 'increment':
setattr(subject, args[0], getattr(subject, args[0]) + args[1])
elif action == 'decrement':
setattr(subject, args[0], getattr(subject, args[0]) - args[1])
for process in processes:
process.join()
The program is guaranteed to terminate when += and -= are process-safe. If you remove process-safety by commenting the corresponding __iadd__ or __isub__ of Decorator then the program will only terminate by chance (e.g. proxy.x += value is equivalent to proxy.x = proxy.x.__iadd__(value) but falls back to proxy.x = proxy.x.__add__(value) if __iadd__ is not defined, which is equivalent to proxy.x = proxy.x + value which is equivalent to proxy.x = type(proxy).x.__get__(proxy) + value which is equivalent to type(proxy).x.__set__(proxy, type(proxy).x.__get__(proxy) + value), so the action attribute is not added and the setter requests a 'set' operation instead of an 'increment' operation).
Example process-safe session (atomic += and -=):
child 0 requested get x
child 0 requested increment x 1
child 0 requested get y
child 0 requested decrement y 1
child 3 requested get x
child 3 requested increment x 1
child 3 requested get y
child 2 requested get x
child 3 requested decrement y 1
child 1 requested get x
child 2 requested increment x 1
child 2 requested get y
child 2 requested decrement y 1
child 1 requested increment x 1
child 1 requested get y
child 1 requested decrement y 1
Example process-unsafe session (non-atomic += and -=):
child 2 requested get x
child 1 requested get x
child 0 requested get x
child 2 requested set x 1
child 2 requested get y
child 1 requested set x 1
child 1 requested get y
child 2 requested set y -1
child 1 requested set y -1
child 0 requested set x 1
child 0 requested get y
child 0 requested set y -2
child 3 requested get x
child 3 requested set x 2
child 3 requested get y
child 3 requested set y -3 # the program stalls here

A more sophisticated solution based on the lock-free atomic operations, as given by example on atomics library README:
from multiprocessing import Process, shared_memory
import atomics
def fn(shmem_name: str, width: int, n: int) -> None:
shmem = shared_memory.SharedMemory(name=shmem_name)
buf = shmem.buf[:width]
with atomics.atomicview(buffer=buf, atype=atomics.INT) as a:
for _ in range(n):
a.inc()
del buf
shmem.close()
if __name__ == "__main__":
# setup
width = 4
shmem = shared_memory.SharedMemory(create=True, size=width)
buf = shmem.buf[:width]
total = 10_000
# run processes to completion
p1 = Process(target=fn, args=(shmem.name, width, total // 2))
p2 = Process(target=fn, args=(shmem.name, width, total // 2))
p1.start(), p2.start()
p1.join(), p2.join()
# print results and cleanup
with atomics.atomicview(buffer=buf, atype=atomics.INT) as a:
print(f"a[{a.load()}] == total[{total}]")
del buf
shmem.close()
shmem.unlink()
(atomics could be installed via pip install atomics on most of the major platforms)

This is a different solution and the simplest to my taste.
The reasoning is you create an empty list and append to it each time your function executes , then print len(list) to check progress.
Here is an example based on your code :
import os
import multiprocessing
counter = []
def analyze(file):
# Analyze the file.
counter.append(' ')
print len(counter)
if __name__ == '__main__':
files = os.listdir('/some/directory')
pool = multiprocessing.Pool(4)
pool.map(analyze, files)
For future visitors, the hack to add counter to multiprocessing is as follow :
from multiprocessing.pool import ThreadPool
counter = []
def your_function():
# function/process
counter.append(' ') # you can append anything
return len(counter)
pool = ThreadPool()
result = pool.map(get_data, urls)
Hope this will help.

I'm working on a process bar in PyQT5, so I use thread and pool together
import threading
import multiprocessing as mp
from queue import Queue
def multi(x):
return x*x
def pooler(q):
with mp.Pool() as pool:
count = 0
for i in pool.imap_unordered(ggg, range(100)):
print(count, i)
count += 1
q.put(count)
def main():
q = Queue()
t = threading.Thread(target=thr, args=(q,))
t.start()
print('start')
process = 0
while process < 100:
process = q.get()
print('p',process)
if __name__ == '__main__':
main()
this I put in Qthread worker and it works with acceptable latency

Related

multiprocessing and time.perf_counter

I'm trying to learn multiproccessing in python and I'd like to see how long my program takes to run the code by using multiproccessing, but I can't understand why the time.perf_counter function prints two really small numbers (6.00004568696022e-07 and 1.200009137392044e-06) and after that the actual amount of seconds (18.546351400000276) of the duration of the program. Can you expalin me why?
Thanks
import time
from multiprocessing import Process
start = time.perf_counter()
def counter(n):
count = 0
while count < n:
count+=1
if __name__ == '__main__':
a = Process(target = counter, args = (500000000,))
b = Process(target = counter, args = (500000000,))
a.start()
b.start()
a.join()
b.join()
print(time.perf_counter() - start)

python coroutine asyncio/ await / aiohttp

new in asyncio world.
going straight to the point...
I want to do/make a request(aiohttp) to a site.
if the wait for an answer pass than N seconds I want to stop the process of waiting.
Do the process again by setting a limit of attempts if needed.
async def search_skiping_add(name_search):
start_time = time()
async with aiohttp.ClientSession() as session:
url = f'https://somesitehere.com'
r = await session.get(url)
final_time = time()
result_time =round(final_time-start_time)
print(result_time)
Maybe, I know, have some way to do it synchronously, but it's an excuse to start using asyncio somehow too.
This should give you an idea of how to use async with aiohttp:
from aiohttp import ClientSession
from asyncio import gather, create_task, sleep, run
from traceback import format_exc
def returnPartionedList(inputlist: list, x: int = 100) -> list: # returns inputlist split into x parts, default is 100
return([inputlist[i:i + x] for i in range(0, len(inputlist), x)])
# You could change validate to an integer and thereby increasing the retry count as needed.
async def GetRessource(url: str, session: ClientSession, validate: bool = False) -> dict:
try:
async with session.get(url) as response:
if response.status == 200:
r: dict = await response.json() # Set equal to .text() to get results as a string
return(r)
else:
r: str = await response.text()
if not validate:
await sleep(3) # Sleep for x amount of seconds before retry
return(await GetRessource(url, session, True))
print(f"Error, got response code: {response.status} message: {r}")
except Exception:
print(f"General Exception:\n{format_exc()}")
return({})
async def GetUrls(urls: list) -> list:
resultsList: list = []
UrlPartitions: list = returnPartionedList(urls, 20) # Rate limit to 20 requests per loop
async with ClientSession(timeout=15) as session: # Timeout is equal to the time to wait in seconds before terminating, default is 300 seconds or 5 minutes.
for partition in UrlPartitions:
partitionTasks: list = [create_task(GetRessource(url, session)) for url in partition]
resultsList.append(await gather(*partitionTasks, return_exceptions=False))
return(resultsList) # Or you can do more processing here before returning
async def main():
urls: list = ["...", "...", "..."] # list of urls to get from
results: list = await GetUrls(urls)
print(results)
if __name__ == "__main__":
run(main())

Unable to get arduino serial communication working in wxpython GUI

This is the definition which is used to update the labels in the GUI:
def updateV(self, event):
""""""
global v
ser = serial.Serial( port='COM3', baudrate=9600)
x = ser.read() # read one byte
ser.close()
print x
if v>3:
self.labelOne.SetBackgroundColour('red')
self.labelOne.SetLabel('Battery Voltage : ' + x)
else:
self.labelOne.SetBackgroundColour('white')
self.labelOne.SetLabel('Battery Voltage : ' + str(v))
self.Refresh()
This is the simple arduino code i have been using:
int a;
void setup() {
Serial.begin(9600);// put your setup code here, to run once:
}
void loop() {
a=5;
Serial.println(a);
delay(10);
}
I have been using this definition to update my labels for my GUI. I recently started to set up serial communication on my GUI using that code. Logically using the mainloop() of the wx library, i thought i could update the 'x' value and get it printed on the GUI. But all the GUI window shows in 0.0 even though the python console prints 5 regularly. Please help! I am pretty new to this.
Your issue is that ser.read() will block. Even if you tweak the timeout of your serial.Serial instance, it still will keep the GUI busy. In that situation I do not know a method to "force" a refresh/wx.Yield(), it simply will not work. The standard solution for blocking calls is to spin up a thread
or poll regularily (e. g. with wx.Timer). However, I was only able to make threading work. The example is based on wxTerminal in pyserial.
# -*- coding: utf-8 -*-
import wx
import serial
from threading import Thread
ARDUINO_NEWLINE = '\r\n'
class serial_reader(object):
def __init__(self, callback=None):
"""Creates serial reader.
:param callback: callable, gets called when byte on serial arrives.
"""
self.callback = callback
self.thread = None
# Signal if serial is alive and should be read
self.alive = False
def start_reader(self, serial_cfg):
"""Start the receiver thread.
:param serial_cfg: dictionary, gets unpacked to parameters for :class:`serial.Serial`
"""
self.ser_cfg = serial_cfg
self.serial = serial.Serial(**serial_cfg)
# set != None so it will not block for longer than timeout on shutdown
self.serial.timeout = 0.1
self.alive = True
self.thread = Thread(target=self.serial_read)
self.thread.daemon = True
self.thread.start()
def stop_reader(self):
"""Stop the receiver thread, wait util it is finished."""
if self.thread is not None:
# signal no more reads
self.alive = False
# wait until thread has finished
self.thread.join()
self.thread = None
# cleanup
self.serial.close()
def serial_read(self):
"""Thread that handles the incoming traffic."""
while self.alive:
try:
text = self.serial.read()
if text and self.callback:
# return value to main loop in thread-safe manner
wx.CallAfter(self.callback, text)
except serial.serialutil.SerialException:
# will happen when Windows goes in sleep mode
print 'serial.serialutil.SerialException'
class ser_frm(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
self.txt = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE)
class serial_controller(object):
def __init__(self, app):
self.app = app
# buffer for serial data
self.ser_buf = ''
self.frm = ser_frm(None, -1, 'testfrm')
# setup serial configuration
self.serial_cfg = {'port': 'COM4', 'baudrate': 9600}
# When parameter dsrdtr is set to True, the Arduino
# will not reset on serial open, for details see
# http://playground.arduino.cc/Main/DisablingAutoResetOnSerialConnection
self.serial_cfg['dsrdtr'] = True
self.ser_rd = serial_reader(callback=self.on_serial)
tit = 'Arduino on port {port} at baudrate {baudrate}'.format(**self.serial_cfg)
self.frm.SetTitle(tit)
self.ser_rd.start_reader(self.serial_cfg)
self.frm.Show()
self.frm.Bind(wx.EVT_CLOSE, self.on_close)
def on_close(self, evt):
"""Shutdown serial read thread before closing."""
if self.ser_rd.alive:
self.ser_rd.stop_reader()
evt.Skip()
def on_serial(self, text):
"""Handle input from the serial port."""
self.ser_buf += text
if self.ser_buf.endswith(ARDUINO_NEWLINE):
if self.frm.txt.GetInsertionPoint() > 1000:
self.frm.txt.SetValue('')
self.frm.txt.AppendText(self.ser_buf)
self.ser_buf = ''
if __name__ == "__main__":
app = wx.App(redirect=False)
serialctr = serial_controller(app)
app.MainLoop()
EDIT: It is not necessary to tinker with DSR/DTR on Arduinos with USB on chip (e.g. the Arduino micro), so delete the line
self.serial_cfg['dsrdtr'] = True
and it will still work properly.

Reading memory and Access is Denied

I need to access all memory of a running process in my local Windows 7-64bit. I am new to winapi.
Here is my problem; Whenever I try to Open a process and reads its memory, I get Access is Denied error.
I searched and found something. It is said that If I run the main process as Administrator and use PROCESS_ALL_ACCESS on OpenProcess, I would have enough right to do it as it is said. OK, I did it. but nothing is changed. On reading memory, I still get Access is Denied.
So, I kept searching and found another thing which is enabling SeDebugPrivilege. I have also done that but nothing is changed. I still get the error.
I've read the quest and his answer here;
Windows Vista/Win7 Privilege Problem: SeDebugPrivilege & OpenProcess .
But as I said, I am really new to winapi. I could not solve my problem yet. Is there anything which which I need to configure in my local operating system?
Here is my Python code with pywin32;
from _ctypes import byref, sizeof, Structure
from ctypes import windll, WinError, c_buffer, c_void_p, create_string_buffer
from ctypes.wintypes import *
import win32security
import win32api
import gc
import ntsecuritycon
from struct import Struct
from win32con import PROCESS_ALL_ACCESS
from struct import calcsize
MEMORY_STATES = {0x1000: "MEM_COMMIT", 0x10000: "MEM_FREE", 0x2000: "MEM_RESERVE"}
MEMORY_PROTECTIONS = {0x10: "PAGE_EXECUTE", 0x20: "PAGE_EXECUTE_READ", 0x40: "PAGEEXECUTE_READWRITE",
0x80: "PAGE_EXECUTE_WRITECOPY", 0x01: "PAGE_NOACCESS", 0x04: "PAGE_READWRITE",
0x08: "PAGE_WRITECOPY"}
MEMORY_TYPES = {0x1000000: "MEM_IMAGE", 0x40000: "MEM_MAPPED", 0x20000: "MEM_PRIVATE"}
class MEMORY_BASIC_INFORMATION(Structure):
_fields_ = [
("BaseAddress", c_void_p),
("AllocationBase", c_void_p),
("AllocationProtect", DWORD),
("RegionSize", UINT),
("State", DWORD),
("Protect", DWORD),
("Type", DWORD)
]
class SYSTEM_INFO(Structure):
_fields_ = [("wProcessorArchitecture", WORD),
("wReserved", WORD),
("dwPageSize", DWORD),
("lpMinimumApplicationAddress", DWORD),
("lpMaximumApplicationAddress", DWORD),
("dwActiveProcessorMask", DWORD),
("dwNumberOfProcessors", DWORD),
("dwProcessorType", DWORD),
("dwAllocationGranularity", DWORD),
("wProcessorLevel", WORD),
("wProcessorRevision", WORD)]
class PyMEMORY_BASIC_INFORMATION:
def __init__(self, MBI):
self.MBI = MBI
self.set_attributes()
def set_attributes(self):
self.BaseAddress = self.MBI.BaseAddress
self.AllocationBase = self.MBI.AllocationBase
self.AllocationProtect = MEMORY_PROTECTIONS.get(self.MBI.AllocationProtect, self.MBI.AllocationProtect)
self.RegionSize = self.MBI.RegionSize
self.State = MEMORY_STATES.get(self.MBI.State, self.MBI.State)
# self.Protect = self.MBI.Protect # uncomment this and comment next line if you want to do a bitwise check on Protect.
self.Protect = MEMORY_PROTECTIONS.get(self.MBI.Protect, self.MBI.Protect)
self.Type = MEMORY_TYPES.get(self.MBI.Type, self.MBI.Type)
ASSUME_ALIGNMENT = True
class TARGET:
"""Given a ctype (initialized or not) this coordinates all the information needed to read, write and compare."""
def __init__(self, ctype):
self.alignment = 1
self.ctype = ctype
# size of target data
self.size = sizeof(ctype)
self.type = ctype._type_
# get the format type needed for struct.unpack/pack.
while hasattr(self.type, "_type_"):
self.type = self.type._type_
# string_buffers and char arrays have _type_ 'c'
# but that makes it slightly slower to unpack
# so swap is for 's'.
if self.type == "c":
self.type = "s"
# calculate byte alignment. this speeds up scanning substantially
# because we can read and compare every alignment bytes
# instead of every single byte.
# although if we are scanning for a string the alignment is defaulted to 1 \
# (im not sure if this is correct).
elif ASSUME_ALIGNMENT:
# calc alignment
divider = 1
for i in xrange(4):
divider *= 2
if not self.size % divider:
self.alignment = divider
# size of target ctype.
self.type_size = calcsize(self.type)
# length of target / array length.
self.length = self.size / self.type_size
self.value = getattr(ctype, "raw", ctype.value)
# the format string used for struct.pack/unpack.
self.format = str(self.length) + self.type
# efficient packer / unpacker for our own format.
self.packer = Struct(self.format)
def get_packed(self):
"""Gets the byte representation of the ctype value for use with WriteProcessMemory."""
return self.packer.pack(self.value)
def __str__(self):
return str(self.ctype)[:10] + "..." + " <" + str(self.value)[:10] + "..." + ">"
class Memory(object):
def __init__(self, process_handle, target):
self._process_handle = process_handle
self._target = target
self.found = []
self.__scann_process()
def __scann_process(self):
"""scans a processes pages for the target value."""
si = SYSTEM_INFO()
psi = byref(si)
windll.kernel32.GetSystemInfo(psi)
base_address = si.lpMinimumApplicationAddress
max_address = si.lpMaximumApplicationAddress
page_address = base_address
while page_address < max_address:
page_address = self.__scan_page(page_address)
if len(self.found) >= 60000000:
print("[Warning] Scan ended early because too many addresses were found to hold the target data.")
break
gc.collect()
return self.found
def __scan_page(self, page_address):
"""Scans the entire page for TARGET instance and returns the next page address and found addresses."""
information = self.VirtualQueryEx(page_address)
base_address = information.BaseAddress
region_size = information.RegionSize
next_region = base_address + region_size
size = self._target.size
target_value = self._target.value
step = self._target.alignment
unpacker = self._target.packer.unpack
if information.Type != "MEM_PRIVATE" or \
region_size < size or \
information.State != "MEM_COMMIT" or \
information.Protect not in ["PAGE_EXECUTE_READ", "PAGEEXECUTE_READWRITE", "PAGE_READWRITE"]:
return next_region
page_bytes = self.ReadMemory(base_address, region_size)
for i in xrange(0, (region_size - size), step):
partial = page_bytes[i:i + size]
if unpacker(partial)[0] == target_value:
self.found.append(base_address + i)
del page_bytes # free the buffer
return next_region
def ReadMemory(self, address, size):
cbuffer = c_buffer(size)
success = windll.kernel32.ReadProcessMemory(
self._process_handle,
address,
cbuffer,
size,
0)
assert success, "ReadMemory Failed with success == %s and address == %s and size == %s.\n%s" % (
success, address, size, WinError(win32api.GetLastError()))
return cbuffer.raw
def VirtualQueryEx(self, address):
MBI = MEMORY_BASIC_INFORMATION()
MBI_pointer = byref(MBI)
size = sizeof(MBI)
success = windll.kernel32.VirtualQueryEx(
self._process_handle,
address,
MBI_pointer,
size)
assert success, "VirtualQueryEx Failed with success == %s.\n%s" % (
success, WinError(win32api.GetLastError())[1])
assert success == size, "VirtualQueryEx Failed because not all data was written."
return PyMEMORY_BASIC_INFORMATION(MBI)
def AdjustPrivilege(priv):
flags = win32security.TOKEN_ADJUST_PRIVILEGES | win32security.TOKEN_QUERY
p = win32api.GetCurrentProcess()
htoken = win32security.OpenProcessToken(p, flags)
id = win32security.LookupPrivilegeValue(None, priv)
newPrivileges = [(id, win32security.SE_PRIVILEGE_ENABLED)]
win32security.AdjustTokenPrivileges(htoken, 0, newPrivileges)
win32api.CloseHandle(htoken)
def OpenProcess(pid=win32api.GetCurrentProcessId()):
# ntsecuritycon.SE_DEBUG_NAME = "SeDebugPrivilege"
AdjustPrivilege(ntsecuritycon.SE_DEBUG_NAME)
phandle = windll.kernel32.OpenProcess( \
PROCESS_ALL_ACCESS,
0,
pid)
assert phandle, "Failed to open process!\n%s" % WinError(win32api.GetLastError())[1]
return phandle
PID = 22852
process_handle = OpenProcess(PID)
Memory(process_handle, TARGET(create_string_buffer("1456")))
Here is the error I always get;
AssertionError: ReadMemory Failed with success == 0 and address == 131072 and size == 4096.
[Error 5] Access is denied.
I do not know what information else about my code and my personal Windows 7 operating system, I should provide to you. If you need to know more, please ask it from me, I will provide it to solve that problem.
I guess, this is about a lack of configuration in my operating system , not about pywin32. I'll be waiting for your solutions.

generating TTT game tree in lua

I am attempting to write a tic-tac-toe game in lua, and plan on using the minimax algorithm to decide non-human moves. The first step in this involves generating a tree of all possible board states from a single input state. I am trying to recursively do this, but cannot seem to figure out how. (I think) I understand conceptually how this should be done, but am having trouble implementing it in lua.
I am trying to structure my tree in the following manner. Each node is a list with two fields.
{ config = {}, children = {} }
Config is a list of integers (0,1,2) that represent empty, X, and O and defines a TTT board state. Children is a list nodes which are all possible board states one move away from the current node.
Here is my function that I currently have to build the game tree
function tree_builder(board, player)
supertemp = {}
for i in ipairs(board.config) do
--iterate through the current board state.
--for each empty location create a new node
--representing a possible board state
if board.config[i] == 0 then
temp = {config = {}, children = {}}
for j in ipairs(board.config) do
temp.config[j] = board.config[j]
end
temp.config[i] = player
temp.children = tree_builder(temp, opposite(player))
supertemp[i] = temp
end
end
return supertemp
end
The function is called in the following manner:
start_board = {config = {1,0,0,0}, children = {} }
start_board.children = tree_builder(start_board, 1)
When I comment out the recursive element of the function (the line "temp.children = builder(temp, opposite(player))") and only generate the first level of children. the output is correct. When called via code that is conceptually identical to (I am using love2D so formatting is different):
for i in pairs(start_board.children) do
print (start_board.children[i].config)
The three children are:
1,1,0,0
1,0,1,0
1,0,0,1
However, once I add the recursive element, the following is output for the same three children
1,1,2,1
1,1,2,1
1,1,2,1
I have been searching online for help and most of what I have found is conceptual in nature or involves implementation in different languages. I believe I have implemented the recursive element wrongly, but cannot wrap my head around the reasons why.
Don't understand what opposite(player) means in temp.children = tree_builder(temp, opposite(player)).
Notice that a recursion need an end condition.
This is my solution under your structure:
local COL = 3
local ROW = 3
local function printBoard( b )
local output = ""
local i = 1
for _,v in ipairs(b.config) do
output = output .. v .. ( (i % COL == 0) and '\n' or ',' )
i = i + 1
end
print( output )
end
local function shallowCopy( t )
local t2 = {}
for k,v in pairs(t) do
t2[k] = v
end
return t2
end
local MAX_STEP = COL * ROW
local ING = 0
local P1 = 1
local P2 = 2
local TIE = 3
local STATUS = { [P1] = "P1 Win", [P2] = "P2 Win", [TIE] = "Tied" }
local start_board = { config = {P1,0,0,0,0,0,0,0,0}, children = {} }
local function checkIfOver( board, step )
local config = board.config
local over = false
--check rows
for i=0,ROW-1 do
over = true
for j=1,COL do
if 0 == config[i*COL+1] or config[i*COL+j] ~= config[i*COL+1] then
over = false
end
end
if over then
return config[i*COL+1]
end
end
--check cols
for i=1,COL do
over = true
for j=0,ROW-1 do
if 0 == config[i] or config[i] ~= config[i+COL*j] then
over = false
end
end
if over then
return config[i]
end
end
--check diagonals
if config[1] ~= 0 and config[1] == config[5] and config[5] == config[9] then
return config[1]
end
if config[3] ~=0 and config[3] == config[5] and config[5] == config[7] then
return config[3]
end
if step >= MAX_STEP then
return TIE
else
return ING
end
end
local function treeBuilder( board, step )
--check the game is over
local over = checkIfOver( board, step )
if over ~= ING then
printBoard( board )
print( STATUS[over], '\n---------\n' )
return
end
local child
local childCfg
for i,v in ipairs(board.config) do
if 0 == v then
child = { config = {}, children = {} }
childCfg = shallowCopy( board.config )
childCfg[i] = (step % 2 == 0) and P1 or P2
child.config = childCfg
table.insert( board.children, child )
treeBuilder( child, step + 1 )
end
end
end
treeBuilder( start_board, 1 )

Resources